]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - fs/ufs/balloc.c
[PATCH] remove many unneeded #includes of sched.h
[net-next-2.6.git] / fs / ufs / balloc.c
... / ...
CommitLineData
1/*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9 */
10
11#include <linux/fs.h>
12#include <linux/ufs_fs.h>
13#include <linux/stat.h>
14#include <linux/time.h>
15#include <linux/string.h>
16#include <linux/quotaops.h>
17#include <linux/buffer_head.h>
18#include <linux/capability.h>
19#include <linux/bitops.h>
20#include <asm/byteorder.h>
21
22#include "swab.h"
23#include "util.h"
24
25#define INVBLOCK ((u64)-1L)
26
27static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33
34/*
35 * Free 'count' fragments from fragment number 'fragment'
36 */
37void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38{
39 struct super_block * sb;
40 struct ufs_sb_private_info * uspi;
41 struct ufs_super_block_first * usb1;
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 u64 blkno;
46
47 sb = inode->i_sb;
48 uspi = UFS_SB(sb)->s_uspi;
49 usb1 = ubh_get_usb_first(uspi);
50
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
53
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
56
57 lock_super(sb);
58
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 goto failed;
64 }
65
66 ucpi = ufs_load_cylinder (sb, cgno);
67 if (!ucpi)
68 goto failed;
69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 goto failed;
73 }
74
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82 else
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
85 }
86
87 DQUOT_FREE_BLOCK (inode, count);
88
89
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 uspi->cs_total.cs_nffree += count;
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95
96 /*
97 * Trying to reassemble free fragments into block
98 */
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 uspi->cs_total.cs_nffree -= uspi->s_fpb;
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 uspi->cs_total.cs_nbfree++;
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 if (uspi->fs_magic != UFS2_MAGIC) {
110 unsigned cylno = ufs_cbtocylno (bbase);
111
112 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
113 ufs_cbtorpos(bbase)), 1);
114 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
115 }
116 }
117
118 ubh_mark_buffer_dirty (USPI_UBH(uspi));
119 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
120 if (sb->s_flags & MS_SYNCHRONOUS) {
121 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
122 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 }
124 sb->s_dirt = 1;
125
126 unlock_super (sb);
127 UFSD("EXIT\n");
128 return;
129
130failed:
131 unlock_super (sb);
132 UFSD("EXIT (FAILED)\n");
133 return;
134}
135
136/*
137 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
138 */
139void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
140{
141 struct super_block * sb;
142 struct ufs_sb_private_info * uspi;
143 struct ufs_super_block_first * usb1;
144 struct ufs_cg_private_info * ucpi;
145 struct ufs_cylinder_group * ucg;
146 unsigned overflow, cgno, bit, end_bit, i;
147 u64 blkno;
148
149 sb = inode->i_sb;
150 uspi = UFS_SB(sb)->s_uspi;
151 usb1 = ubh_get_usb_first(uspi);
152
153 UFSD("ENTER, fragment %llu, count %u\n",
154 (unsigned long long)fragment, count);
155
156 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
157 ufs_error (sb, "ufs_free_blocks", "internal error, "
158 "fragment %llu, count %u\n",
159 (unsigned long long)fragment, count);
160 goto failed;
161 }
162
163 lock_super(sb);
164
165do_more:
166 overflow = 0;
167 cgno = ufs_dtog(uspi, fragment);
168 bit = ufs_dtogd(uspi, fragment);
169 if (cgno >= uspi->s_ncg) {
170 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
171 goto failed_unlock;
172 }
173 end_bit = bit + count;
174 if (end_bit > uspi->s_fpg) {
175 overflow = bit + count - uspi->s_fpg;
176 count -= overflow;
177 end_bit -= overflow;
178 }
179
180 ucpi = ufs_load_cylinder (sb, cgno);
181 if (!ucpi)
182 goto failed_unlock;
183 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
184 if (!ufs_cg_chkmagic(sb, ucg)) {
185 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
186 goto failed_unlock;
187 }
188
189 for (i = bit; i < end_bit; i += uspi->s_fpb) {
190 blkno = ufs_fragstoblks(i);
191 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
192 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
193 }
194 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
195 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
196 ufs_clusteracct (sb, ucpi, blkno, 1);
197 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
198
199 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
200 uspi->cs_total.cs_nbfree++;
201 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
202
203 if (uspi->fs_magic != UFS2_MAGIC) {
204 unsigned cylno = ufs_cbtocylno(i);
205
206 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
207 ufs_cbtorpos(i)), 1);
208 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
209 }
210 }
211
212 ubh_mark_buffer_dirty (USPI_UBH(uspi));
213 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
214 if (sb->s_flags & MS_SYNCHRONOUS) {
215 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
216 ubh_wait_on_buffer (UCPI_UBH(ucpi));
217 }
218
219 if (overflow) {
220 fragment += count;
221 count = overflow;
222 goto do_more;
223 }
224
225 sb->s_dirt = 1;
226 unlock_super (sb);
227 UFSD("EXIT\n");
228 return;
229
230failed_unlock:
231 unlock_super (sb);
232failed:
233 UFSD("EXIT (FAILED)\n");
234 return;
235}
236
237/*
238 * Modify inode page cache in such way:
239 * have - blocks with b_blocknr equal to oldb...oldb+count-1
240 * get - blocks with b_blocknr equal to newb...newb+count-1
241 * also we suppose that oldb...oldb+count-1 blocks
242 * situated at the end of file.
243 *
244 * We can come here from ufs_writepage or ufs_prepare_write,
245 * locked_page is argument of these functions, so we already lock it.
246 */
247static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
248 unsigned int count, unsigned int oldb,
249 unsigned int newb, struct page *locked_page)
250{
251 const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
252 struct address_space * const mapping = inode->i_mapping;
253 pgoff_t index, cur_index;
254 unsigned end, pos, j;
255 struct page *page;
256 struct buffer_head *head, *bh;
257
258 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
259 inode->i_ino, count, oldb, newb);
260
261 BUG_ON(!locked_page);
262 BUG_ON(!PageLocked(locked_page));
263
264 cur_index = locked_page->index;
265
266 for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
267 index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
268
269 if (likely(cur_index != index)) {
270 page = ufs_get_locked_page(mapping, index);
271 if (!page || IS_ERR(page)) /* it was truncated or EIO */
272 continue;
273 } else
274 page = locked_page;
275
276 head = page_buffers(page);
277 bh = head;
278 pos = beg & mask;
279 for (j = 0; j < pos; ++j)
280 bh = bh->b_this_page;
281 j = 0;
282 do {
283 if (buffer_mapped(bh)) {
284 pos = bh->b_blocknr - oldb;
285 if (pos < count) {
286 UFSD(" change from %llu to %llu\n",
287 (unsigned long long)pos + oldb,
288 (unsigned long long)pos + newb);
289 bh->b_blocknr = newb + pos;
290 unmap_underlying_metadata(bh->b_bdev,
291 bh->b_blocknr);
292 mark_buffer_dirty(bh);
293 ++j;
294 }
295 }
296
297 bh = bh->b_this_page;
298 } while (bh != head);
299
300 if (j)
301 set_page_dirty(page);
302
303 if (likely(cur_index != index))
304 ufs_put_locked_page(page);
305 }
306 UFSD("EXIT\n");
307}
308
309static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
310 int sync)
311{
312 struct buffer_head *bh;
313 sector_t end = beg + n;
314
315 for (; beg < end; ++beg) {
316 bh = sb_getblk(inode->i_sb, beg);
317 lock_buffer(bh);
318 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
319 set_buffer_uptodate(bh);
320 mark_buffer_dirty(bh);
321 unlock_buffer(bh);
322 if (IS_SYNC(inode) || sync)
323 sync_dirty_buffer(bh);
324 brelse(bh);
325 }
326}
327
328u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
329 u64 goal, unsigned count, int *err,
330 struct page *locked_page)
331{
332 struct super_block * sb;
333 struct ufs_sb_private_info * uspi;
334 struct ufs_super_block_first * usb1;
335 unsigned cgno, oldcount, newcount;
336 u64 tmp, request, result;
337
338 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
339 inode->i_ino, (unsigned long long)fragment,
340 (unsigned long long)goal, count);
341
342 sb = inode->i_sb;
343 uspi = UFS_SB(sb)->s_uspi;
344 usb1 = ubh_get_usb_first(uspi);
345 *err = -ENOSPC;
346
347 lock_super (sb);
348 tmp = ufs_data_ptr_to_cpu(sb, p);
349
350 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
351 ufs_warning(sb, "ufs_new_fragments", "internal warning"
352 " fragment %llu, count %u",
353 (unsigned long long)fragment, count);
354 count = uspi->s_fpb - ufs_fragnum(fragment);
355 }
356 oldcount = ufs_fragnum (fragment);
357 newcount = oldcount + count;
358
359 /*
360 * Somebody else has just allocated our fragments
361 */
362 if (oldcount) {
363 if (!tmp) {
364 ufs_error(sb, "ufs_new_fragments", "internal error, "
365 "fragment %llu, tmp %llu\n",
366 (unsigned long long)fragment,
367 (unsigned long long)tmp);
368 unlock_super(sb);
369 return INVBLOCK;
370 }
371 if (fragment < UFS_I(inode)->i_lastfrag) {
372 UFSD("EXIT (ALREADY ALLOCATED)\n");
373 unlock_super (sb);
374 return 0;
375 }
376 }
377 else {
378 if (tmp) {
379 UFSD("EXIT (ALREADY ALLOCATED)\n");
380 unlock_super(sb);
381 return 0;
382 }
383 }
384
385 /*
386 * There is not enough space for user on the device
387 */
388 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
389 unlock_super (sb);
390 UFSD("EXIT (FAILED)\n");
391 return 0;
392 }
393
394 if (goal >= uspi->s_size)
395 goal = 0;
396 if (goal == 0)
397 cgno = ufs_inotocg (inode->i_ino);
398 else
399 cgno = ufs_dtog(uspi, goal);
400
401 /*
402 * allocate new fragment
403 */
404 if (oldcount == 0) {
405 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
406 if (result) {
407 ufs_cpu_to_data_ptr(sb, p, result);
408 *err = 0;
409 UFS_I(inode)->i_lastfrag =
410 max_t(u32, UFS_I(inode)->i_lastfrag,
411 fragment + count);
412 ufs_clear_frags(inode, result + oldcount,
413 newcount - oldcount, locked_page != NULL);
414 }
415 unlock_super(sb);
416 UFSD("EXIT, result %llu\n", (unsigned long long)result);
417 return result;
418 }
419
420 /*
421 * resize block
422 */
423 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
424 if (result) {
425 *err = 0;
426 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
427 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
428 locked_page != NULL);
429 unlock_super(sb);
430 UFSD("EXIT, result %llu\n", (unsigned long long)result);
431 return result;
432 }
433
434 /*
435 * allocate new block and move data
436 */
437 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
438 case UFS_OPTSPACE:
439 request = newcount;
440 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
441 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
442 break;
443 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
444 break;
445 default:
446 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
447
448 case UFS_OPTTIME:
449 request = uspi->s_fpb;
450 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
451 (uspi->s_minfree - 2) / 100)
452 break;
453 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
454 break;
455 }
456 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
457 if (result) {
458 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
459 locked_page != NULL);
460 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
461 result, locked_page);
462 ufs_cpu_to_data_ptr(sb, p, result);
463 *err = 0;
464 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
465 unlock_super(sb);
466 if (newcount < request)
467 ufs_free_fragments (inode, result + newcount, request - newcount);
468 ufs_free_fragments (inode, tmp, oldcount);
469 UFSD("EXIT, result %llu\n", (unsigned long long)result);
470 return result;
471 }
472
473 unlock_super(sb);
474 UFSD("EXIT (FAILED)\n");
475 return 0;
476}
477
478static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
479 unsigned oldcount, unsigned newcount, int *err)
480{
481 struct super_block * sb;
482 struct ufs_sb_private_info * uspi;
483 struct ufs_super_block_first * usb1;
484 struct ufs_cg_private_info * ucpi;
485 struct ufs_cylinder_group * ucg;
486 unsigned cgno, fragno, fragoff, count, fragsize, i;
487
488 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
489 (unsigned long long)fragment, oldcount, newcount);
490
491 sb = inode->i_sb;
492 uspi = UFS_SB(sb)->s_uspi;
493 usb1 = ubh_get_usb_first (uspi);
494 count = newcount - oldcount;
495
496 cgno = ufs_dtog(uspi, fragment);
497 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
498 return 0;
499 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
500 return 0;
501 ucpi = ufs_load_cylinder (sb, cgno);
502 if (!ucpi)
503 return 0;
504 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
505 if (!ufs_cg_chkmagic(sb, ucg)) {
506 ufs_panic (sb, "ufs_add_fragments",
507 "internal error, bad magic number on cg %u", cgno);
508 return 0;
509 }
510
511 fragno = ufs_dtogd(uspi, fragment);
512 fragoff = ufs_fragnum (fragno);
513 for (i = oldcount; i < newcount; i++)
514 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
515 return 0;
516 /*
517 * Block can be extended
518 */
519 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
520 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
521 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
522 break;
523 fragsize = i - oldcount;
524 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
525 ufs_panic (sb, "ufs_add_fragments",
526 "internal error or corrupted bitmap on cg %u", cgno);
527 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
528 if (fragsize != count)
529 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
530 for (i = oldcount; i < newcount; i++)
531 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
532 if(DQUOT_ALLOC_BLOCK(inode, count)) {
533 *err = -EDQUOT;
534 return 0;
535 }
536
537 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
538 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
539 uspi->cs_total.cs_nffree -= count;
540
541 ubh_mark_buffer_dirty (USPI_UBH(uspi));
542 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
543 if (sb->s_flags & MS_SYNCHRONOUS) {
544 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
545 ubh_wait_on_buffer (UCPI_UBH(ucpi));
546 }
547 sb->s_dirt = 1;
548
549 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
550
551 return fragment;
552}
553
554#define UFS_TEST_FREE_SPACE_CG \
555 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
556 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
557 goto cg_found; \
558 for (k = count; k < uspi->s_fpb; k++) \
559 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
560 goto cg_found;
561
562static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
563 u64 goal, unsigned count, int *err)
564{
565 struct super_block * sb;
566 struct ufs_sb_private_info * uspi;
567 struct ufs_super_block_first * usb1;
568 struct ufs_cg_private_info * ucpi;
569 struct ufs_cylinder_group * ucg;
570 unsigned oldcg, i, j, k, allocsize;
571 u64 result;
572
573 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
574 inode->i_ino, cgno, (unsigned long long)goal, count);
575
576 sb = inode->i_sb;
577 uspi = UFS_SB(sb)->s_uspi;
578 usb1 = ubh_get_usb_first(uspi);
579 oldcg = cgno;
580
581 /*
582 * 1. searching on preferred cylinder group
583 */
584 UFS_TEST_FREE_SPACE_CG
585
586 /*
587 * 2. quadratic rehash
588 */
589 for (j = 1; j < uspi->s_ncg; j *= 2) {
590 cgno += j;
591 if (cgno >= uspi->s_ncg)
592 cgno -= uspi->s_ncg;
593 UFS_TEST_FREE_SPACE_CG
594 }
595
596 /*
597 * 3. brute force search
598 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
599 */
600 cgno = (oldcg + 1) % uspi->s_ncg;
601 for (j = 2; j < uspi->s_ncg; j++) {
602 cgno++;
603 if (cgno >= uspi->s_ncg)
604 cgno = 0;
605 UFS_TEST_FREE_SPACE_CG
606 }
607
608 UFSD("EXIT (FAILED)\n");
609 return 0;
610
611cg_found:
612 ucpi = ufs_load_cylinder (sb, cgno);
613 if (!ucpi)
614 return 0;
615 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
616 if (!ufs_cg_chkmagic(sb, ucg))
617 ufs_panic (sb, "ufs_alloc_fragments",
618 "internal error, bad magic number on cg %u", cgno);
619 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
620
621 if (count == uspi->s_fpb) {
622 result = ufs_alloccg_block (inode, ucpi, goal, err);
623 if (result == INVBLOCK)
624 return 0;
625 goto succed;
626 }
627
628 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
629 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
630 break;
631
632 if (allocsize == uspi->s_fpb) {
633 result = ufs_alloccg_block (inode, ucpi, goal, err);
634 if (result == INVBLOCK)
635 return 0;
636 goal = ufs_dtogd(uspi, result);
637 for (i = count; i < uspi->s_fpb; i++)
638 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
639 i = uspi->s_fpb - count;
640 DQUOT_FREE_BLOCK(inode, i);
641
642 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
643 uspi->cs_total.cs_nffree += i;
644 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
645 fs32_add(sb, &ucg->cg_frsum[i], 1);
646 goto succed;
647 }
648
649 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
650 if (result == INVBLOCK)
651 return 0;
652 if(DQUOT_ALLOC_BLOCK(inode, count)) {
653 *err = -EDQUOT;
654 return 0;
655 }
656 for (i = 0; i < count; i++)
657 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
658
659 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
660 uspi->cs_total.cs_nffree -= count;
661 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
662 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
663
664 if (count != allocsize)
665 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
666
667succed:
668 ubh_mark_buffer_dirty (USPI_UBH(uspi));
669 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
670 if (sb->s_flags & MS_SYNCHRONOUS) {
671 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
672 ubh_wait_on_buffer (UCPI_UBH(ucpi));
673 }
674 sb->s_dirt = 1;
675
676 result += cgno * uspi->s_fpg;
677 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
678 return result;
679}
680
681static u64 ufs_alloccg_block(struct inode *inode,
682 struct ufs_cg_private_info *ucpi,
683 u64 goal, int *err)
684{
685 struct super_block * sb;
686 struct ufs_sb_private_info * uspi;
687 struct ufs_super_block_first * usb1;
688 struct ufs_cylinder_group * ucg;
689 u64 result, blkno;
690
691 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
692
693 sb = inode->i_sb;
694 uspi = UFS_SB(sb)->s_uspi;
695 usb1 = ubh_get_usb_first(uspi);
696 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
697
698 if (goal == 0) {
699 goal = ucpi->c_rotor;
700 goto norot;
701 }
702 goal = ufs_blknum (goal);
703 goal = ufs_dtogd(uspi, goal);
704
705 /*
706 * If the requested block is available, use it.
707 */
708 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
709 result = goal;
710 goto gotit;
711 }
712
713norot:
714 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
715 if (result == INVBLOCK)
716 return INVBLOCK;
717 ucpi->c_rotor = result;
718gotit:
719 blkno = ufs_fragstoblks(result);
720 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
722 ufs_clusteracct (sb, ucpi, blkno, -1);
723 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
724 *err = -EDQUOT;
725 return INVBLOCK;
726 }
727
728 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
729 uspi->cs_total.cs_nbfree--;
730 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
731
732 if (uspi->fs_magic != UFS2_MAGIC) {
733 unsigned cylno = ufs_cbtocylno((unsigned)result);
734
735 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
736 ufs_cbtorpos((unsigned)result)), 1);
737 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
738 }
739
740 UFSD("EXIT, result %llu\n", (unsigned long long)result);
741
742 return result;
743}
744
745static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
746 struct ufs_buffer_head *ubh,
747 unsigned begin, unsigned size,
748 unsigned char *table, unsigned char mask)
749{
750 unsigned rest, offset;
751 unsigned char *cp;
752
753
754 offset = begin & ~uspi->s_fmask;
755 begin >>= uspi->s_fshift;
756 for (;;) {
757 if ((offset + size) < uspi->s_fsize)
758 rest = size;
759 else
760 rest = uspi->s_fsize - offset;
761 size -= rest;
762 cp = ubh->bh[begin]->b_data + offset;
763 while ((table[*cp++] & mask) == 0 && --rest)
764 ;
765 if (rest || !size)
766 break;
767 begin++;
768 offset = 0;
769 }
770 return (size + rest);
771}
772
773/*
774 * Find a block of the specified size in the specified cylinder group.
775 * @sp: pointer to super block
776 * @ucpi: pointer to cylinder group info
777 * @goal: near which block we want find new one
778 * @count: specified size
779 */
780static u64 ufs_bitmap_search(struct super_block *sb,
781 struct ufs_cg_private_info *ucpi,
782 u64 goal, unsigned count)
783{
784 /*
785 * Bit patterns for identifying fragments in the block map
786 * used as ((map & mask_arr) == want_arr)
787 */
788 static const int mask_arr[9] = {
789 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
790 };
791 static const int want_arr[9] = {
792 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
793 };
794 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
795 struct ufs_super_block_first *usb1;
796 struct ufs_cylinder_group *ucg;
797 unsigned start, length, loc;
798 unsigned pos, want, blockmap, mask, end;
799 u64 result;
800
801 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
802 (unsigned long long)goal, count);
803
804 usb1 = ubh_get_usb_first (uspi);
805 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
806
807 if (goal)
808 start = ufs_dtogd(uspi, goal) >> 3;
809 else
810 start = ucpi->c_frotor >> 3;
811
812 length = ((uspi->s_fpg + 7) >> 3) - start;
813 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
814 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
815 1 << (count - 1 + (uspi->s_fpb & 7)));
816 if (loc == 0) {
817 length = start + 1;
818 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
819 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
820 ufs_fragtable_other,
821 1 << (count - 1 + (uspi->s_fpb & 7)));
822 if (loc == 0) {
823 ufs_error(sb, "ufs_bitmap_search",
824 "bitmap corrupted on cg %u, start %u,"
825 " length %u, count %u, freeoff %u\n",
826 ucpi->c_cgx, start, length, count,
827 ucpi->c_freeoff);
828 return INVBLOCK;
829 }
830 start = 0;
831 }
832 result = (start + length - loc) << 3;
833 ucpi->c_frotor = result;
834
835 /*
836 * found the byte in the map
837 */
838
839 for (end = result + 8; result < end; result += uspi->s_fpb) {
840 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
841 blockmap <<= 1;
842 mask = mask_arr[count];
843 want = want_arr[count];
844 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
845 if ((blockmap & mask) == want) {
846 UFSD("EXIT, result %llu\n",
847 (unsigned long long)result);
848 return result + pos;
849 }
850 mask <<= 1;
851 want <<= 1;
852 }
853 }
854
855 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
856 ucpi->c_cgx);
857 UFSD("EXIT (FAILED)\n");
858 return INVBLOCK;
859}
860
861static void ufs_clusteracct(struct super_block * sb,
862 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
863{
864 struct ufs_sb_private_info * uspi;
865 int i, start, end, forw, back;
866
867 uspi = UFS_SB(sb)->s_uspi;
868 if (uspi->s_contigsumsize <= 0)
869 return;
870
871 if (cnt > 0)
872 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
873 else
874 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
875
876 /*
877 * Find the size of the cluster going forward.
878 */
879 start = blkno + 1;
880 end = start + uspi->s_contigsumsize;
881 if ( end >= ucpi->c_nclusterblks)
882 end = ucpi->c_nclusterblks;
883 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
884 if (i > end)
885 i = end;
886 forw = i - start;
887
888 /*
889 * Find the size of the cluster going backward.
890 */
891 start = blkno - 1;
892 end = start - uspi->s_contigsumsize;
893 if (end < 0 )
894 end = -1;
895 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
896 if ( i < end)
897 i = end;
898 back = start - i;
899
900 /*
901 * Account for old cluster and the possibly new forward and
902 * back clusters.
903 */
904 i = back + forw + 1;
905 if (i > uspi->s_contigsumsize)
906 i = uspi->s_contigsumsize;
907 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
908 if (back > 0)
909 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
910 if (forw > 0)
911 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
912}
913
914
915static unsigned char ufs_fragtable_8fpb[] = {
916 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
917 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
918 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
919 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
920 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
921 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
922 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
923 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
924 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
925 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
926 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
927 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
928 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
929 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
930 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
931 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
932};
933
934static unsigned char ufs_fragtable_other[] = {
935 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
936 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
938 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
940 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
941 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
942 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
943 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
944 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
945 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
947 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
948 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
949 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
950 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
951};