]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ext4/mballoc.c
ext4: Remove old legacy block allocator
[net-next-2.6.git] / fs / ext4 / mballoc.c
CommitLineData
c9de560d
AT
1/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
8f6e39a7 24#include "mballoc.h"
c9de560d
AT
25/*
26 * MUSTDO:
27 * - test ext4_ext_search_left() and ext4_ext_search_right()
28 * - search for metadata in few groups
29 *
30 * TODO v4:
31 * - normalization should take into account whether file is still open
32 * - discard preallocations if no free space left (policy?)
33 * - don't normalize tails
34 * - quota
35 * - reservation for superuser
36 *
37 * TODO v3:
38 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
39 * - track min/max extents in each group for better group selection
40 * - mb_mark_used() may allocate chunk right after splitting buddy
41 * - tree of groups sorted by number of free blocks
42 * - error handling
43 */
44
45/*
46 * The allocation request involve request for multiple number of blocks
47 * near to the goal(block) value specified.
48 *
49 * During initialization phase of the allocator we decide to use the group
50 * preallocation or inode preallocation depending on the size file. The
51 * size of the file could be the resulting file size we would have after
52 * allocation or the current file size which ever is larger. If the size is
53 * less that sbi->s_mb_stream_request we select the group
54 * preallocation. The default value of s_mb_stream_request is 16
55 * blocks. This can also be tuned via
56 * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms
57 * of number of blocks.
58 *
59 * The main motivation for having small file use group preallocation is to
60 * ensure that we have small file closer in the disk.
61 *
62 * First stage the allocator looks at the inode prealloc list
63 * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for
64 * this particular inode. The inode prealloc space is represented as:
65 *
66 * pa_lstart -> the logical start block for this prealloc space
67 * pa_pstart -> the physical start block for this prealloc space
68 * pa_len -> lenght for this prealloc space
69 * pa_free -> free space available in this prealloc space
70 *
71 * The inode preallocation space is used looking at the _logical_ start
72 * block. If only the logical file block falls within the range of prealloc
73 * space we will consume the particular prealloc space. This make sure that
74 * that the we have contiguous physical blocks representing the file blocks
75 *
76 * The important thing to be noted in case of inode prealloc space is that
77 * we don't modify the values associated to inode prealloc space except
78 * pa_free.
79 *
80 * If we are not able to find blocks in the inode prealloc space and if we
81 * have the group allocation flag set then we look at the locality group
82 * prealloc space. These are per CPU prealloc list repreasented as
83 *
84 * ext4_sb_info.s_locality_groups[smp_processor_id()]
85 *
86 * The reason for having a per cpu locality group is to reduce the contention
87 * between CPUs. It is possible to get scheduled at this point.
88 *
89 * The locality group prealloc space is used looking at whether we have
90 * enough free space (pa_free) withing the prealloc space.
91 *
92 * If we can't allocate blocks via inode prealloc or/and locality group
93 * prealloc then we look at the buddy cache. The buddy cache is represented
94 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
95 * mapped to the buddy and bitmap information regarding different
96 * groups. The buddy information is attached to buddy cache inode so that
97 * we can access them through the page cache. The information regarding
98 * each group is loaded via ext4_mb_load_buddy. The information involve
99 * block bitmap and buddy information. The information are stored in the
100 * inode as:
101 *
102 * { page }
103 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
104 *
105 *
106 * one block each for bitmap and buddy information. So for each group we
107 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
108 * blocksize) blocks. So it can have information regarding groups_per_page
109 * which is blocks_per_page/2
110 *
111 * The buddy cache inode is not stored on disk. The inode is thrown
112 * away when the filesystem is unmounted.
113 *
114 * We look for count number of blocks in the buddy cache. If we were able
115 * to locate that many free blocks we return with additional information
116 * regarding rest of the contiguous physical block available
117 *
118 * Before allocating blocks via buddy cache we normalize the request
119 * blocks. This ensure we ask for more blocks that we needed. The extra
120 * blocks that we get after allocation is added to the respective prealloc
121 * list. In case of inode preallocation we follow a list of heuristics
122 * based on file size. This can be found in ext4_mb_normalize_request. If
123 * we are doing a group prealloc we try to normalize the request to
124 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to
125 * 512 blocks. This can be tuned via
126 * /proc/fs/ext4/<partition/group_prealloc. The value is represented in
127 * terms of number of blocks. If we have mounted the file system with -O
128 * stripe=<value> option the group prealloc request is normalized to the
129 * stripe value (sbi->s_stripe)
130 *
131 * The regular allocator(using the buddy cache) support few tunables.
132 *
133 * /proc/fs/ext4/<partition>/min_to_scan
134 * /proc/fs/ext4/<partition>/max_to_scan
135 * /proc/fs/ext4/<partition>/order2_req
136 *
137 * The regular allocator use buddy scan only if the request len is power of
138 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
139 * value of s_mb_order2_reqs can be tuned via
140 * /proc/fs/ext4/<partition>/order2_req. If the request len is equal to
141 * stripe size (sbi->s_stripe), we try to search for contigous block in
142 * stripe size. This should result in better allocation on RAID setup. If
143 * not we search in the specific group using bitmap for best extents. The
144 * tunable min_to_scan and max_to_scan controll the behaviour here.
145 * min_to_scan indicate how long the mballoc __must__ look for a best
146 * extent and max_to_scanindicate how long the mballoc __can__ look for a
147 * best extent in the found extents. Searching for the blocks starts with
148 * the group specified as the goal value in allocation context via
149 * ac_g_ex. Each group is first checked based on the criteria whether it
150 * can used for allocation. ext4_mb_good_group explains how the groups are
151 * checked.
152 *
153 * Both the prealloc space are getting populated as above. So for the first
154 * request we will hit the buddy cache which will result in this prealloc
155 * space getting filled. The prealloc space is then later used for the
156 * subsequent request.
157 */
158
159/*
160 * mballoc operates on the following data:
161 * - on-disk bitmap
162 * - in-core buddy (actually includes buddy and bitmap)
163 * - preallocation descriptors (PAs)
164 *
165 * there are two types of preallocations:
166 * - inode
167 * assiged to specific inode and can be used for this inode only.
168 * it describes part of inode's space preallocated to specific
169 * physical blocks. any block from that preallocated can be used
170 * independent. the descriptor just tracks number of blocks left
171 * unused. so, before taking some block from descriptor, one must
172 * make sure corresponded logical block isn't allocated yet. this
173 * also means that freeing any block within descriptor's range
174 * must discard all preallocated blocks.
175 * - locality group
176 * assigned to specific locality group which does not translate to
177 * permanent set of inodes: inode can join and leave group. space
178 * from this type of preallocation can be used for any inode. thus
179 * it's consumed from the beginning to the end.
180 *
181 * relation between them can be expressed as:
182 * in-core buddy = on-disk bitmap + preallocation descriptors
183 *
184 * this mean blocks mballoc considers used are:
185 * - allocated blocks (persistent)
186 * - preallocated blocks (non-persistent)
187 *
188 * consistency in mballoc world means that at any time a block is either
189 * free or used in ALL structures. notice: "any time" should not be read
190 * literally -- time is discrete and delimited by locks.
191 *
192 * to keep it simple, we don't use block numbers, instead we count number of
193 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
194 *
195 * all operations can be expressed as:
196 * - init buddy: buddy = on-disk + PAs
197 * - new PA: buddy += N; PA = N
198 * - use inode PA: on-disk += N; PA -= N
199 * - discard inode PA buddy -= on-disk - PA; PA = 0
200 * - use locality group PA on-disk += N; PA -= N
201 * - discard locality group PA buddy -= PA; PA = 0
202 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203 * is used in real operation because we can't know actual used
204 * bits from PA, only from on-disk bitmap
205 *
206 * if we follow this strict logic, then all operations above should be atomic.
207 * given some of them can block, we'd have to use something like semaphores
208 * killing performance on high-end SMP hardware. let's try to relax it using
209 * the following knowledge:
210 * 1) if buddy is referenced, it's already initialized
211 * 2) while block is used in buddy and the buddy is referenced,
212 * nobody can re-allocate that block
213 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
214 * bit set and PA claims same block, it's OK. IOW, one can set bit in
215 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
216 * block
217 *
218 * so, now we're building a concurrency table:
219 * - init buddy vs.
220 * - new PA
221 * blocks for PA are allocated in the buddy, buddy must be referenced
222 * until PA is linked to allocation group to avoid concurrent buddy init
223 * - use inode PA
224 * we need to make sure that either on-disk bitmap or PA has uptodate data
225 * given (3) we care that PA-=N operation doesn't interfere with init
226 * - discard inode PA
227 * the simplest way would be to have buddy initialized by the discard
228 * - use locality group PA
229 * again PA-=N must be serialized with init
230 * - discard locality group PA
231 * the simplest way would be to have buddy initialized by the discard
232 * - new PA vs.
233 * - use inode PA
234 * i_data_sem serializes them
235 * - discard inode PA
236 * discard process must wait until PA isn't used by another process
237 * - use locality group PA
238 * some mutex should serialize them
239 * - discard locality group PA
240 * discard process must wait until PA isn't used by another process
241 * - use inode PA
242 * - use inode PA
243 * i_data_sem or another mutex should serializes them
244 * - discard inode PA
245 * discard process must wait until PA isn't used by another process
246 * - use locality group PA
247 * nothing wrong here -- they're different PAs covering different blocks
248 * - discard locality group PA
249 * discard process must wait until PA isn't used by another process
250 *
251 * now we're ready to make few consequences:
252 * - PA is referenced and while it is no discard is possible
253 * - PA is referenced until block isn't marked in on-disk bitmap
254 * - PA changes only after on-disk bitmap
255 * - discard must not compete with init. either init is done before
256 * any discard or they're serialized somehow
257 * - buddy init as sum of on-disk bitmap and PAs is done atomically
258 *
259 * a special case when we've used PA to emptiness. no need to modify buddy
260 * in this case, but we should care about concurrent init
261 *
262 */
263
264 /*
265 * Logic in few words:
266 *
267 * - allocation:
268 * load group
269 * find blocks
270 * mark bits in on-disk bitmap
271 * release group
272 *
273 * - use preallocation:
274 * find proper PA (per-inode or group)
275 * load group
276 * mark bits in on-disk bitmap
277 * release group
278 * release PA
279 *
280 * - free:
281 * load group
282 * mark bits in on-disk bitmap
283 * release group
284 *
285 * - discard preallocations in group:
286 * mark PAs deleted
287 * move them onto local list
288 * load on-disk bitmap
289 * load group
290 * remove PA from object (inode or locality group)
291 * mark free blocks in-core
292 *
293 * - discard inode's preallocations:
294 */
295
296/*
297 * Locking rules
298 *
299 * Locks:
300 * - bitlock on a group (group)
301 * - object (inode/locality) (object)
302 * - per-pa lock (pa)
303 *
304 * Paths:
305 * - new pa
306 * object
307 * group
308 *
309 * - find and use pa:
310 * pa
311 *
312 * - release consumed pa:
313 * pa
314 * group
315 * object
316 *
317 * - generate in-core bitmap:
318 * group
319 * pa
320 *
321 * - discard all for given object (inode, locality group):
322 * object
323 * pa
324 * group
325 *
326 * - discard all for given group:
327 * group
328 * pa
329 * group
330 * object
331 *
332 */
333
ffad0a44
AK
334static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
335{
c9de560d 336#if BITS_PER_LONG == 64
ffad0a44
AK
337 *bit += ((unsigned long) addr & 7UL) << 3;
338 addr = (void *) ((unsigned long) addr & ~7UL);
c9de560d 339#elif BITS_PER_LONG == 32
ffad0a44
AK
340 *bit += ((unsigned long) addr & 3UL) << 3;
341 addr = (void *) ((unsigned long) addr & ~3UL);
c9de560d
AT
342#else
343#error "how many bits you are?!"
344#endif
ffad0a44
AK
345 return addr;
346}
c9de560d
AT
347
348static inline int mb_test_bit(int bit, void *addr)
349{
350 /*
351 * ext4_test_bit on architecture like powerpc
352 * needs unsigned long aligned address
353 */
ffad0a44 354 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
355 return ext4_test_bit(bit, addr);
356}
357
358static inline void mb_set_bit(int bit, void *addr)
359{
ffad0a44 360 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
361 ext4_set_bit(bit, addr);
362}
363
364static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
365{
ffad0a44 366 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
367 ext4_set_bit_atomic(lock, bit, addr);
368}
369
370static inline void mb_clear_bit(int bit, void *addr)
371{
ffad0a44 372 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
373 ext4_clear_bit(bit, addr);
374}
375
376static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
377{
ffad0a44 378 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
379 ext4_clear_bit_atomic(lock, bit, addr);
380}
381
ffad0a44
AK
382static inline int mb_find_next_zero_bit(void *addr, int max, int start)
383{
e7dfb246 384 int fix = 0, ret, tmpmax;
ffad0a44 385 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 386 tmpmax = max + fix;
ffad0a44
AK
387 start += fix;
388
e7dfb246
AK
389 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
390 if (ret > max)
391 return max;
392 return ret;
ffad0a44
AK
393}
394
395static inline int mb_find_next_bit(void *addr, int max, int start)
396{
e7dfb246 397 int fix = 0, ret, tmpmax;
ffad0a44 398 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 399 tmpmax = max + fix;
ffad0a44
AK
400 start += fix;
401
e7dfb246
AK
402 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
403 if (ret > max)
404 return max;
405 return ret;
ffad0a44
AK
406}
407
c9de560d
AT
408static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
409{
410 char *bb;
411
c9de560d
AT
412 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
413 BUG_ON(max == NULL);
414
415 if (order > e4b->bd_blkbits + 1) {
416 *max = 0;
417 return NULL;
418 }
419
420 /* at order 0 we see each particular block */
421 *max = 1 << (e4b->bd_blkbits + 3);
422 if (order == 0)
423 return EXT4_MB_BITMAP(e4b);
424
425 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
426 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
427
428 return bb;
429}
430
431#ifdef DOUBLE_CHECK
432static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
433 int first, int count)
434{
435 int i;
436 struct super_block *sb = e4b->bd_sb;
437
438 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
439 return;
440 BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
441 for (i = 0; i < count; i++) {
442 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
443 ext4_fsblk_t blocknr;
444 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
445 blocknr += first + i;
446 blocknr +=
447 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
448
46e665e9 449 ext4_error(sb, __func__, "double-free of inode"
c9de560d
AT
450 " %lu's block %llu(bit %u in group %lu)\n",
451 inode ? inode->i_ino : 0, blocknr,
452 first + i, e4b->bd_group);
453 }
454 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
455 }
456}
457
458static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
459{
460 int i;
461
462 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
463 return;
464 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
465 for (i = 0; i < count; i++) {
466 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
467 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
468 }
469}
470
471static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
472{
473 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
474 unsigned char *b1, *b2;
475 int i;
476 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
477 b2 = (unsigned char *) bitmap;
478 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
479 if (b1[i] != b2[i]) {
4776004f
TT
480 printk(KERN_ERR "corruption in group %lu "
481 "at byte %u(%u): %x in copy != %x "
482 "on disk/prealloc\n",
483 e4b->bd_group, i, i * 8, b1[i], b2[i]);
c9de560d
AT
484 BUG();
485 }
486 }
487 }
488}
489
490#else
491static inline void mb_free_blocks_double(struct inode *inode,
492 struct ext4_buddy *e4b, int first, int count)
493{
494 return;
495}
496static inline void mb_mark_used_double(struct ext4_buddy *e4b,
497 int first, int count)
498{
499 return;
500}
501static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
502{
503 return;
504}
505#endif
506
507#ifdef AGGRESSIVE_CHECK
508
509#define MB_CHECK_ASSERT(assert) \
510do { \
511 if (!(assert)) { \
512 printk(KERN_EMERG \
513 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
514 function, file, line, # assert); \
515 BUG(); \
516 } \
517} while (0)
518
519static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
520 const char *function, int line)
521{
522 struct super_block *sb = e4b->bd_sb;
523 int order = e4b->bd_blkbits + 1;
524 int max;
525 int max2;
526 int i;
527 int j;
528 int k;
529 int count;
530 struct ext4_group_info *grp;
531 int fragments = 0;
532 int fstart;
533 struct list_head *cur;
534 void *buddy;
535 void *buddy2;
536
c9de560d
AT
537 {
538 static int mb_check_counter;
539 if (mb_check_counter++ % 100 != 0)
540 return 0;
541 }
542
543 while (order > 1) {
544 buddy = mb_find_buddy(e4b, order, &max);
545 MB_CHECK_ASSERT(buddy);
546 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
547 MB_CHECK_ASSERT(buddy2);
548 MB_CHECK_ASSERT(buddy != buddy2);
549 MB_CHECK_ASSERT(max * 2 == max2);
550
551 count = 0;
552 for (i = 0; i < max; i++) {
553
554 if (mb_test_bit(i, buddy)) {
555 /* only single bit in buddy2 may be 1 */
556 if (!mb_test_bit(i << 1, buddy2)) {
557 MB_CHECK_ASSERT(
558 mb_test_bit((i<<1)+1, buddy2));
559 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
560 MB_CHECK_ASSERT(
561 mb_test_bit(i << 1, buddy2));
562 }
563 continue;
564 }
565
566 /* both bits in buddy2 must be 0 */
567 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
568 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
569
570 for (j = 0; j < (1 << order); j++) {
571 k = (i * (1 << order)) + j;
572 MB_CHECK_ASSERT(
573 !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
574 }
575 count++;
576 }
577 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
578 order--;
579 }
580
581 fstart = -1;
582 buddy = mb_find_buddy(e4b, 0, &max);
583 for (i = 0; i < max; i++) {
584 if (!mb_test_bit(i, buddy)) {
585 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
586 if (fstart == -1) {
587 fragments++;
588 fstart = i;
589 }
590 continue;
591 }
592 fstart = -1;
593 /* check used bits only */
594 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
595 buddy2 = mb_find_buddy(e4b, j, &max2);
596 k = i >> j;
597 MB_CHECK_ASSERT(k < max2);
598 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
599 }
600 }
601 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
602 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
603
604 grp = ext4_get_group_info(sb, e4b->bd_group);
605 buddy = mb_find_buddy(e4b, 0, &max);
606 list_for_each(cur, &grp->bb_prealloc_list) {
607 ext4_group_t groupnr;
608 struct ext4_prealloc_space *pa;
60bd63d1
SR
609 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
610 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
c9de560d 611 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
60bd63d1 612 for (i = 0; i < pa->pa_len; i++)
c9de560d
AT
613 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
614 }
615 return 0;
616}
617#undef MB_CHECK_ASSERT
618#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
46e665e9 619 __FILE__, __func__, __LINE__)
c9de560d
AT
620#else
621#define mb_check_buddy(e4b)
622#endif
623
624/* FIXME!! need more doc */
625static void ext4_mb_mark_free_simple(struct super_block *sb,
626 void *buddy, unsigned first, int len,
627 struct ext4_group_info *grp)
628{
629 struct ext4_sb_info *sbi = EXT4_SB(sb);
630 unsigned short min;
631 unsigned short max;
632 unsigned short chunk;
633 unsigned short border;
634
b73fce69 635 BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
c9de560d
AT
636
637 border = 2 << sb->s_blocksize_bits;
638
639 while (len > 0) {
640 /* find how many blocks can be covered since this position */
641 max = ffs(first | border) - 1;
642
643 /* find how many blocks of power 2 we need to mark */
644 min = fls(len) - 1;
645
646 if (max < min)
647 min = max;
648 chunk = 1 << min;
649
650 /* mark multiblock chunks only */
651 grp->bb_counters[min]++;
652 if (min > 0)
653 mb_clear_bit(first >> min,
654 buddy + sbi->s_mb_offsets[min]);
655
656 len -= chunk;
657 first += chunk;
658 }
659}
660
661static void ext4_mb_generate_buddy(struct super_block *sb,
662 void *buddy, void *bitmap, ext4_group_t group)
663{
664 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
665 unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
666 unsigned short i = 0;
667 unsigned short first;
668 unsigned short len;
669 unsigned free = 0;
670 unsigned fragments = 0;
671 unsigned long long period = get_cycles();
672
673 /* initialize buddy from bitmap which is aggregation
674 * of on-disk bitmap and preallocations */
ffad0a44 675 i = mb_find_next_zero_bit(bitmap, max, 0);
c9de560d
AT
676 grp->bb_first_free = i;
677 while (i < max) {
678 fragments++;
679 first = i;
ffad0a44 680 i = mb_find_next_bit(bitmap, max, i);
c9de560d
AT
681 len = i - first;
682 free += len;
683 if (len > 1)
684 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
685 else
686 grp->bb_counters[0]++;
687 if (i < max)
ffad0a44 688 i = mb_find_next_zero_bit(bitmap, max, i);
c9de560d
AT
689 }
690 grp->bb_fragments = fragments;
691
692 if (free != grp->bb_free) {
46e665e9 693 ext4_error(sb, __func__,
c9de560d
AT
694 "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
695 group, free, grp->bb_free);
e56eb659
AK
696 /*
697 * If we intent to continue, we consider group descritor
698 * corrupt and update bb_free using bitmap value
699 */
c9de560d
AT
700 grp->bb_free = free;
701 }
702
703 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
704
705 period = get_cycles() - period;
706 spin_lock(&EXT4_SB(sb)->s_bal_lock);
707 EXT4_SB(sb)->s_mb_buddies_generated++;
708 EXT4_SB(sb)->s_mb_generation_time += period;
709 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
710}
711
712/* The buddy information is attached the buddy cache inode
713 * for convenience. The information regarding each group
714 * is loaded via ext4_mb_load_buddy. The information involve
715 * block bitmap and buddy information. The information are
716 * stored in the inode as
717 *
718 * { page }
719 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
720 *
721 *
722 * one block each for bitmap and buddy information.
723 * So for each group we take up 2 blocks. A page can
724 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
725 * So it can have information regarding groups_per_page which
726 * is blocks_per_page/2
727 */
728
729static int ext4_mb_init_cache(struct page *page, char *incore)
730{
731 int blocksize;
732 int blocks_per_page;
733 int groups_per_page;
734 int err = 0;
735 int i;
736 ext4_group_t first_group;
737 int first_block;
738 struct super_block *sb;
739 struct buffer_head *bhs;
740 struct buffer_head **bh;
741 struct inode *inode;
742 char *data;
743 char *bitmap;
744
745 mb_debug("init page %lu\n", page->index);
746
747 inode = page->mapping->host;
748 sb = inode->i_sb;
749 blocksize = 1 << inode->i_blkbits;
750 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
751
752 groups_per_page = blocks_per_page >> 1;
753 if (groups_per_page == 0)
754 groups_per_page = 1;
755
756 /* allocate buffer_heads to read bitmaps */
757 if (groups_per_page > 1) {
758 err = -ENOMEM;
759 i = sizeof(struct buffer_head *) * groups_per_page;
760 bh = kzalloc(i, GFP_NOFS);
761 if (bh == NULL)
762 goto out;
763 } else
764 bh = &bhs;
765
766 first_group = page->index * blocks_per_page / 2;
767
768 /* read all groups the page covers into the cache */
769 for (i = 0; i < groups_per_page; i++) {
770 struct ext4_group_desc *desc;
771
772 if (first_group + i >= EXT4_SB(sb)->s_groups_count)
773 break;
774
775 err = -EIO;
776 desc = ext4_get_group_desc(sb, first_group + i, NULL);
777 if (desc == NULL)
778 goto out;
779
780 err = -ENOMEM;
781 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
782 if (bh[i] == NULL)
783 goto out;
784
785 if (bh_uptodate_or_lock(bh[i]))
786 continue;
787
b5f10eed 788 spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
c9de560d
AT
789 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
790 ext4_init_block_bitmap(sb, bh[i],
791 first_group + i, desc);
792 set_buffer_uptodate(bh[i]);
793 unlock_buffer(bh[i]);
b5f10eed 794 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
c9de560d
AT
795 continue;
796 }
b5f10eed 797 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
c9de560d
AT
798 get_bh(bh[i]);
799 bh[i]->b_end_io = end_buffer_read_sync;
800 submit_bh(READ, bh[i]);
801 mb_debug("read bitmap for group %lu\n", first_group + i);
802 }
803
804 /* wait for I/O completion */
805 for (i = 0; i < groups_per_page && bh[i]; i++)
806 wait_on_buffer(bh[i]);
807
808 err = -EIO;
809 for (i = 0; i < groups_per_page && bh[i]; i++)
810 if (!buffer_uptodate(bh[i]))
811 goto out;
812
31b481dc 813 err = 0;
c9de560d
AT
814 first_block = page->index * blocks_per_page;
815 for (i = 0; i < blocks_per_page; i++) {
816 int group;
817 struct ext4_group_info *grinfo;
818
819 group = (first_block + i) >> 1;
820 if (group >= EXT4_SB(sb)->s_groups_count)
821 break;
822
823 /*
824 * data carry information regarding this
825 * particular group in the format specified
826 * above
827 *
828 */
829 data = page_address(page) + (i * blocksize);
830 bitmap = bh[group - first_group]->b_data;
831
832 /*
833 * We place the buddy block and bitmap block
834 * close together
835 */
836 if ((first_block + i) & 1) {
837 /* this is block of buddy */
838 BUG_ON(incore == NULL);
839 mb_debug("put buddy for group %u in page %lu/%x\n",
840 group, page->index, i * blocksize);
841 memset(data, 0xff, blocksize);
842 grinfo = ext4_get_group_info(sb, group);
843 grinfo->bb_fragments = 0;
844 memset(grinfo->bb_counters, 0,
845 sizeof(unsigned short)*(sb->s_blocksize_bits+2));
846 /*
847 * incore got set to the group block bitmap below
848 */
849 ext4_mb_generate_buddy(sb, data, incore, group);
850 incore = NULL;
851 } else {
852 /* this is block of bitmap */
853 BUG_ON(incore != NULL);
854 mb_debug("put bitmap for group %u in page %lu/%x\n",
855 group, page->index, i * blocksize);
856
857 /* see comments in ext4_mb_put_pa() */
858 ext4_lock_group(sb, group);
859 memcpy(data, bitmap, blocksize);
860
861 /* mark all preallocated blks used in in-core bitmap */
862 ext4_mb_generate_from_pa(sb, data, group);
863 ext4_unlock_group(sb, group);
864
865 /* set incore so that the buddy information can be
866 * generated using this
867 */
868 incore = data;
869 }
870 }
871 SetPageUptodate(page);
872
873out:
874 if (bh) {
875 for (i = 0; i < groups_per_page && bh[i]; i++)
876 brelse(bh[i]);
877 if (bh != &bhs)
878 kfree(bh);
879 }
880 return err;
881}
882
4ddfef7b
ES
883static noinline_for_stack int
884ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
885 struct ext4_buddy *e4b)
c9de560d
AT
886{
887 struct ext4_sb_info *sbi = EXT4_SB(sb);
888 struct inode *inode = sbi->s_buddy_cache;
889 int blocks_per_page;
890 int block;
891 int pnum;
892 int poff;
893 struct page *page;
fdf6c7a7 894 int ret;
c9de560d
AT
895
896 mb_debug("load group %lu\n", group);
897
898 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
899
900 e4b->bd_blkbits = sb->s_blocksize_bits;
901 e4b->bd_info = ext4_get_group_info(sb, group);
902 e4b->bd_sb = sb;
903 e4b->bd_group = group;
904 e4b->bd_buddy_page = NULL;
905 e4b->bd_bitmap_page = NULL;
906
907 /*
908 * the buddy cache inode stores the block bitmap
909 * and buddy information in consecutive blocks.
910 * So for each group we need two blocks.
911 */
912 block = group * 2;
913 pnum = block / blocks_per_page;
914 poff = block % blocks_per_page;
915
916 /* we could use find_or_create_page(), but it locks page
917 * what we'd like to avoid in fast path ... */
918 page = find_get_page(inode->i_mapping, pnum);
919 if (page == NULL || !PageUptodate(page)) {
920 if (page)
921 page_cache_release(page);
922 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
923 if (page) {
924 BUG_ON(page->mapping != inode->i_mapping);
925 if (!PageUptodate(page)) {
fdf6c7a7
SF
926 ret = ext4_mb_init_cache(page, NULL);
927 if (ret) {
928 unlock_page(page);
929 goto err;
930 }
c9de560d
AT
931 mb_cmp_bitmaps(e4b, page_address(page) +
932 (poff * sb->s_blocksize));
933 }
934 unlock_page(page);
935 }
936 }
fdf6c7a7
SF
937 if (page == NULL || !PageUptodate(page)) {
938 ret = -EIO;
c9de560d 939 goto err;
fdf6c7a7 940 }
c9de560d
AT
941 e4b->bd_bitmap_page = page;
942 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
943 mark_page_accessed(page);
944
945 block++;
946 pnum = block / blocks_per_page;
947 poff = block % blocks_per_page;
948
949 page = find_get_page(inode->i_mapping, pnum);
950 if (page == NULL || !PageUptodate(page)) {
951 if (page)
952 page_cache_release(page);
953 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
954 if (page) {
955 BUG_ON(page->mapping != inode->i_mapping);
fdf6c7a7
SF
956 if (!PageUptodate(page)) {
957 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
958 if (ret) {
959 unlock_page(page);
960 goto err;
961 }
962 }
c9de560d
AT
963 unlock_page(page);
964 }
965 }
fdf6c7a7
SF
966 if (page == NULL || !PageUptodate(page)) {
967 ret = -EIO;
c9de560d 968 goto err;
fdf6c7a7 969 }
c9de560d
AT
970 e4b->bd_buddy_page = page;
971 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
972 mark_page_accessed(page);
973
974 BUG_ON(e4b->bd_bitmap_page == NULL);
975 BUG_ON(e4b->bd_buddy_page == NULL);
976
977 return 0;
978
979err:
980 if (e4b->bd_bitmap_page)
981 page_cache_release(e4b->bd_bitmap_page);
982 if (e4b->bd_buddy_page)
983 page_cache_release(e4b->bd_buddy_page);
984 e4b->bd_buddy = NULL;
985 e4b->bd_bitmap = NULL;
fdf6c7a7 986 return ret;
c9de560d
AT
987}
988
989static void ext4_mb_release_desc(struct ext4_buddy *e4b)
990{
991 if (e4b->bd_bitmap_page)
992 page_cache_release(e4b->bd_bitmap_page);
993 if (e4b->bd_buddy_page)
994 page_cache_release(e4b->bd_buddy_page);
995}
996
997
998static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
999{
1000 int order = 1;
1001 void *bb;
1002
1003 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1004 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1005
1006 bb = EXT4_MB_BUDDY(e4b);
1007 while (order <= e4b->bd_blkbits + 1) {
1008 block = block >> 1;
1009 if (!mb_test_bit(block, bb)) {
1010 /* this block is part of buddy of order 'order' */
1011 return order;
1012 }
1013 bb += 1 << (e4b->bd_blkbits - order);
1014 order++;
1015 }
1016 return 0;
1017}
1018
1019static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1020{
1021 __u32 *addr;
1022
1023 len = cur + len;
1024 while (cur < len) {
1025 if ((cur & 31) == 0 && (len - cur) >= 32) {
1026 /* fast path: clear whole word at once */
1027 addr = bm + (cur >> 3);
1028 *addr = 0;
1029 cur += 32;
1030 continue;
1031 }
1032 mb_clear_bit_atomic(lock, cur, bm);
1033 cur++;
1034 }
1035}
1036
1037static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1038{
1039 __u32 *addr;
1040
1041 len = cur + len;
1042 while (cur < len) {
1043 if ((cur & 31) == 0 && (len - cur) >= 32) {
1044 /* fast path: set whole word at once */
1045 addr = bm + (cur >> 3);
1046 *addr = 0xffffffff;
1047 cur += 32;
1048 continue;
1049 }
1050 mb_set_bit_atomic(lock, cur, bm);
1051 cur++;
1052 }
1053}
1054
7e5a8cdd 1055static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
c9de560d
AT
1056 int first, int count)
1057{
1058 int block = 0;
1059 int max = 0;
1060 int order;
1061 void *buddy;
1062 void *buddy2;
1063 struct super_block *sb = e4b->bd_sb;
1064
1065 BUG_ON(first + count > (sb->s_blocksize << 3));
1066 BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1067 mb_check_buddy(e4b);
1068 mb_free_blocks_double(inode, e4b, first, count);
1069
1070 e4b->bd_info->bb_free += count;
1071 if (first < e4b->bd_info->bb_first_free)
1072 e4b->bd_info->bb_first_free = first;
1073
1074 /* let's maintain fragments counter */
1075 if (first != 0)
1076 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1077 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1078 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1079 if (block && max)
1080 e4b->bd_info->bb_fragments--;
1081 else if (!block && !max)
1082 e4b->bd_info->bb_fragments++;
1083
1084 /* let's maintain buddy itself */
1085 while (count-- > 0) {
1086 block = first++;
1087 order = 0;
1088
1089 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1090 ext4_fsblk_t blocknr;
1091 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1092 blocknr += block;
1093 blocknr +=
1094 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
7e5a8cdd 1095 ext4_unlock_group(sb, e4b->bd_group);
46e665e9 1096 ext4_error(sb, __func__, "double-free of inode"
c9de560d
AT
1097 " %lu's block %llu(bit %u in group %lu)\n",
1098 inode ? inode->i_ino : 0, blocknr, block,
1099 e4b->bd_group);
7e5a8cdd 1100 ext4_lock_group(sb, e4b->bd_group);
c9de560d
AT
1101 }
1102 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1103 e4b->bd_info->bb_counters[order]++;
1104
1105 /* start of the buddy */
1106 buddy = mb_find_buddy(e4b, order, &max);
1107
1108 do {
1109 block &= ~1UL;
1110 if (mb_test_bit(block, buddy) ||
1111 mb_test_bit(block + 1, buddy))
1112 break;
1113
1114 /* both the buddies are free, try to coalesce them */
1115 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1116
1117 if (!buddy2)
1118 break;
1119
1120 if (order > 0) {
1121 /* for special purposes, we don't set
1122 * free bits in bitmap */
1123 mb_set_bit(block, buddy);
1124 mb_set_bit(block + 1, buddy);
1125 }
1126 e4b->bd_info->bb_counters[order]--;
1127 e4b->bd_info->bb_counters[order]--;
1128
1129 block = block >> 1;
1130 order++;
1131 e4b->bd_info->bb_counters[order]++;
1132
1133 mb_clear_bit(block, buddy2);
1134 buddy = buddy2;
1135 } while (1);
1136 }
1137 mb_check_buddy(e4b);
c9de560d
AT
1138}
1139
1140static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1141 int needed, struct ext4_free_extent *ex)
1142{
1143 int next = block;
1144 int max;
1145 int ord;
1146 void *buddy;
1147
1148 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1149 BUG_ON(ex == NULL);
1150
1151 buddy = mb_find_buddy(e4b, order, &max);
1152 BUG_ON(buddy == NULL);
1153 BUG_ON(block >= max);
1154 if (mb_test_bit(block, buddy)) {
1155 ex->fe_len = 0;
1156 ex->fe_start = 0;
1157 ex->fe_group = 0;
1158 return 0;
1159 }
1160
1161 /* FIXME dorp order completely ? */
1162 if (likely(order == 0)) {
1163 /* find actual order */
1164 order = mb_find_order_for_block(e4b, block);
1165 block = block >> order;
1166 }
1167
1168 ex->fe_len = 1 << order;
1169 ex->fe_start = block << order;
1170 ex->fe_group = e4b->bd_group;
1171
1172 /* calc difference from given start */
1173 next = next - ex->fe_start;
1174 ex->fe_len -= next;
1175 ex->fe_start += next;
1176
1177 while (needed > ex->fe_len &&
1178 (buddy = mb_find_buddy(e4b, order, &max))) {
1179
1180 if (block + 1 >= max)
1181 break;
1182
1183 next = (block + 1) * (1 << order);
1184 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1185 break;
1186
1187 ord = mb_find_order_for_block(e4b, next);
1188
1189 order = ord;
1190 block = next >> order;
1191 ex->fe_len += 1 << order;
1192 }
1193
1194 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1195 return ex->fe_len;
1196}
1197
1198static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1199{
1200 int ord;
1201 int mlen = 0;
1202 int max = 0;
1203 int cur;
1204 int start = ex->fe_start;
1205 int len = ex->fe_len;
1206 unsigned ret = 0;
1207 int len0 = len;
1208 void *buddy;
1209
1210 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1211 BUG_ON(e4b->bd_group != ex->fe_group);
1212 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1213 mb_check_buddy(e4b);
1214 mb_mark_used_double(e4b, start, len);
1215
1216 e4b->bd_info->bb_free -= len;
1217 if (e4b->bd_info->bb_first_free == start)
1218 e4b->bd_info->bb_first_free += len;
1219
1220 /* let's maintain fragments counter */
1221 if (start != 0)
1222 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1223 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1224 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1225 if (mlen && max)
1226 e4b->bd_info->bb_fragments++;
1227 else if (!mlen && !max)
1228 e4b->bd_info->bb_fragments--;
1229
1230 /* let's maintain buddy itself */
1231 while (len) {
1232 ord = mb_find_order_for_block(e4b, start);
1233
1234 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1235 /* the whole chunk may be allocated at once! */
1236 mlen = 1 << ord;
1237 buddy = mb_find_buddy(e4b, ord, &max);
1238 BUG_ON((start >> ord) >= max);
1239 mb_set_bit(start >> ord, buddy);
1240 e4b->bd_info->bb_counters[ord]--;
1241 start += mlen;
1242 len -= mlen;
1243 BUG_ON(len < 0);
1244 continue;
1245 }
1246
1247 /* store for history */
1248 if (ret == 0)
1249 ret = len | (ord << 16);
1250
1251 /* we have to split large buddy */
1252 BUG_ON(ord <= 0);
1253 buddy = mb_find_buddy(e4b, ord, &max);
1254 mb_set_bit(start >> ord, buddy);
1255 e4b->bd_info->bb_counters[ord]--;
1256
1257 ord--;
1258 cur = (start >> ord) & ~1U;
1259 buddy = mb_find_buddy(e4b, ord, &max);
1260 mb_clear_bit(cur, buddy);
1261 mb_clear_bit(cur + 1, buddy);
1262 e4b->bd_info->bb_counters[ord]++;
1263 e4b->bd_info->bb_counters[ord]++;
1264 }
1265
1266 mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1267 EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1268 mb_check_buddy(e4b);
1269
1270 return ret;
1271}
1272
1273/*
1274 * Must be called under group lock!
1275 */
1276static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1277 struct ext4_buddy *e4b)
1278{
1279 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1280 int ret;
1281
1282 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1283 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1284
1285 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1286 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1287 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1288
1289 /* preallocation can change ac_b_ex, thus we store actually
1290 * allocated blocks for history */
1291 ac->ac_f_ex = ac->ac_b_ex;
1292
1293 ac->ac_status = AC_STATUS_FOUND;
1294 ac->ac_tail = ret & 0xffff;
1295 ac->ac_buddy = ret >> 16;
1296
1297 /* XXXXXXX: SUCH A HORRIBLE **CK */
1298 /*FIXME!! Why ? */
1299 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1300 get_page(ac->ac_bitmap_page);
1301 ac->ac_buddy_page = e4b->bd_buddy_page;
1302 get_page(ac->ac_buddy_page);
1303
1304 /* store last allocated for subsequent stream allocation */
1305 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1306 spin_lock(&sbi->s_md_lock);
1307 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1308 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1309 spin_unlock(&sbi->s_md_lock);
1310 }
1311}
1312
1313/*
1314 * regular allocator, for general purposes allocation
1315 */
1316
1317static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1318 struct ext4_buddy *e4b,
1319 int finish_group)
1320{
1321 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1322 struct ext4_free_extent *bex = &ac->ac_b_ex;
1323 struct ext4_free_extent *gex = &ac->ac_g_ex;
1324 struct ext4_free_extent ex;
1325 int max;
1326
1327 /*
1328 * We don't want to scan for a whole year
1329 */
1330 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1331 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1332 ac->ac_status = AC_STATUS_BREAK;
1333 return;
1334 }
1335
1336 /*
1337 * Haven't found good chunk so far, let's continue
1338 */
1339 if (bex->fe_len < gex->fe_len)
1340 return;
1341
1342 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1343 && bex->fe_group == e4b->bd_group) {
1344 /* recheck chunk's availability - we don't know
1345 * when it was found (within this lock-unlock
1346 * period or not) */
1347 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1348 if (max >= gex->fe_len) {
1349 ext4_mb_use_best_found(ac, e4b);
1350 return;
1351 }
1352 }
1353}
1354
1355/*
1356 * The routine checks whether found extent is good enough. If it is,
1357 * then the extent gets marked used and flag is set to the context
1358 * to stop scanning. Otherwise, the extent is compared with the
1359 * previous found extent and if new one is better, then it's stored
1360 * in the context. Later, the best found extent will be used, if
1361 * mballoc can't find good enough extent.
1362 *
1363 * FIXME: real allocation policy is to be designed yet!
1364 */
1365static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1366 struct ext4_free_extent *ex,
1367 struct ext4_buddy *e4b)
1368{
1369 struct ext4_free_extent *bex = &ac->ac_b_ex;
1370 struct ext4_free_extent *gex = &ac->ac_g_ex;
1371
1372 BUG_ON(ex->fe_len <= 0);
1373 BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1374 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1375 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1376
1377 ac->ac_found++;
1378
1379 /*
1380 * The special case - take what you catch first
1381 */
1382 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1383 *bex = *ex;
1384 ext4_mb_use_best_found(ac, e4b);
1385 return;
1386 }
1387
1388 /*
1389 * Let's check whether the chuck is good enough
1390 */
1391 if (ex->fe_len == gex->fe_len) {
1392 *bex = *ex;
1393 ext4_mb_use_best_found(ac, e4b);
1394 return;
1395 }
1396
1397 /*
1398 * If this is first found extent, just store it in the context
1399 */
1400 if (bex->fe_len == 0) {
1401 *bex = *ex;
1402 return;
1403 }
1404
1405 /*
1406 * If new found extent is better, store it in the context
1407 */
1408 if (bex->fe_len < gex->fe_len) {
1409 /* if the request isn't satisfied, any found extent
1410 * larger than previous best one is better */
1411 if (ex->fe_len > bex->fe_len)
1412 *bex = *ex;
1413 } else if (ex->fe_len > gex->fe_len) {
1414 /* if the request is satisfied, then we try to find
1415 * an extent that still satisfy the request, but is
1416 * smaller than previous one */
1417 if (ex->fe_len < bex->fe_len)
1418 *bex = *ex;
1419 }
1420
1421 ext4_mb_check_limits(ac, e4b, 0);
1422}
1423
1424static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1425 struct ext4_buddy *e4b)
1426{
1427 struct ext4_free_extent ex = ac->ac_b_ex;
1428 ext4_group_t group = ex.fe_group;
1429 int max;
1430 int err;
1431
1432 BUG_ON(ex.fe_len <= 0);
1433 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1434 if (err)
1435 return err;
1436
1437 ext4_lock_group(ac->ac_sb, group);
1438 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1439
1440 if (max > 0) {
1441 ac->ac_b_ex = ex;
1442 ext4_mb_use_best_found(ac, e4b);
1443 }
1444
1445 ext4_unlock_group(ac->ac_sb, group);
1446 ext4_mb_release_desc(e4b);
1447
1448 return 0;
1449}
1450
1451static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1452 struct ext4_buddy *e4b)
1453{
1454 ext4_group_t group = ac->ac_g_ex.fe_group;
1455 int max;
1456 int err;
1457 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1458 struct ext4_super_block *es = sbi->s_es;
1459 struct ext4_free_extent ex;
1460
1461 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1462 return 0;
1463
1464 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1465 if (err)
1466 return err;
1467
1468 ext4_lock_group(ac->ac_sb, group);
1469 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1470 ac->ac_g_ex.fe_len, &ex);
1471
1472 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1473 ext4_fsblk_t start;
1474
1475 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1476 ex.fe_start + le32_to_cpu(es->s_first_data_block);
1477 /* use do_div to get remainder (would be 64-bit modulo) */
1478 if (do_div(start, sbi->s_stripe) == 0) {
1479 ac->ac_found++;
1480 ac->ac_b_ex = ex;
1481 ext4_mb_use_best_found(ac, e4b);
1482 }
1483 } else if (max >= ac->ac_g_ex.fe_len) {
1484 BUG_ON(ex.fe_len <= 0);
1485 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1486 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1487 ac->ac_found++;
1488 ac->ac_b_ex = ex;
1489 ext4_mb_use_best_found(ac, e4b);
1490 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1491 /* Sometimes, caller may want to merge even small
1492 * number of blocks to an existing extent */
1493 BUG_ON(ex.fe_len <= 0);
1494 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1495 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1496 ac->ac_found++;
1497 ac->ac_b_ex = ex;
1498 ext4_mb_use_best_found(ac, e4b);
1499 }
1500 ext4_unlock_group(ac->ac_sb, group);
1501 ext4_mb_release_desc(e4b);
1502
1503 return 0;
1504}
1505
1506/*
1507 * The routine scans buddy structures (not bitmap!) from given order
1508 * to max order and tries to find big enough chunk to satisfy the req
1509 */
1510static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1511 struct ext4_buddy *e4b)
1512{
1513 struct super_block *sb = ac->ac_sb;
1514 struct ext4_group_info *grp = e4b->bd_info;
1515 void *buddy;
1516 int i;
1517 int k;
1518 int max;
1519
1520 BUG_ON(ac->ac_2order <= 0);
1521 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1522 if (grp->bb_counters[i] == 0)
1523 continue;
1524
1525 buddy = mb_find_buddy(e4b, i, &max);
1526 BUG_ON(buddy == NULL);
1527
ffad0a44 1528 k = mb_find_next_zero_bit(buddy, max, 0);
c9de560d
AT
1529 BUG_ON(k >= max);
1530
1531 ac->ac_found++;
1532
1533 ac->ac_b_ex.fe_len = 1 << i;
1534 ac->ac_b_ex.fe_start = k << i;
1535 ac->ac_b_ex.fe_group = e4b->bd_group;
1536
1537 ext4_mb_use_best_found(ac, e4b);
1538
1539 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1540
1541 if (EXT4_SB(sb)->s_mb_stats)
1542 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1543
1544 break;
1545 }
1546}
1547
1548/*
1549 * The routine scans the group and measures all found extents.
1550 * In order to optimize scanning, caller must pass number of
1551 * free blocks in the group, so the routine can know upper limit.
1552 */
1553static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1554 struct ext4_buddy *e4b)
1555{
1556 struct super_block *sb = ac->ac_sb;
1557 void *bitmap = EXT4_MB_BITMAP(e4b);
1558 struct ext4_free_extent ex;
1559 int i;
1560 int free;
1561
1562 free = e4b->bd_info->bb_free;
1563 BUG_ON(free <= 0);
1564
1565 i = e4b->bd_info->bb_first_free;
1566
1567 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
ffad0a44 1568 i = mb_find_next_zero_bit(bitmap,
c9de560d
AT
1569 EXT4_BLOCKS_PER_GROUP(sb), i);
1570 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
26346ff6 1571 /*
e56eb659 1572 * IF we have corrupt bitmap, we won't find any
26346ff6
AK
1573 * free blocks even though group info says we
1574 * we have free blocks
1575 */
46e665e9 1576 ext4_error(sb, __func__, "%d free blocks as per "
26346ff6
AK
1577 "group info. But bitmap says 0\n",
1578 free);
c9de560d
AT
1579 break;
1580 }
1581
1582 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1583 BUG_ON(ex.fe_len <= 0);
26346ff6 1584 if (free < ex.fe_len) {
46e665e9 1585 ext4_error(sb, __func__, "%d free blocks as per "
26346ff6
AK
1586 "group info. But got %d blocks\n",
1587 free, ex.fe_len);
e56eb659
AK
1588 /*
1589 * The number of free blocks differs. This mostly
1590 * indicate that the bitmap is corrupt. So exit
1591 * without claiming the space.
1592 */
1593 break;
26346ff6 1594 }
c9de560d
AT
1595
1596 ext4_mb_measure_extent(ac, &ex, e4b);
1597
1598 i += ex.fe_len;
1599 free -= ex.fe_len;
1600 }
1601
1602 ext4_mb_check_limits(ac, e4b, 1);
1603}
1604
1605/*
1606 * This is a special case for storages like raid5
1607 * we try to find stripe-aligned chunks for stripe-size requests
1608 * XXX should do so at least for multiples of stripe size as well
1609 */
1610static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1611 struct ext4_buddy *e4b)
1612{
1613 struct super_block *sb = ac->ac_sb;
1614 struct ext4_sb_info *sbi = EXT4_SB(sb);
1615 void *bitmap = EXT4_MB_BITMAP(e4b);
1616 struct ext4_free_extent ex;
1617 ext4_fsblk_t first_group_block;
1618 ext4_fsblk_t a;
1619 ext4_grpblk_t i;
1620 int max;
1621
1622 BUG_ON(sbi->s_stripe == 0);
1623
1624 /* find first stripe-aligned block in group */
1625 first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1626 + le32_to_cpu(sbi->s_es->s_first_data_block);
1627 a = first_group_block + sbi->s_stripe - 1;
1628 do_div(a, sbi->s_stripe);
1629 i = (a * sbi->s_stripe) - first_group_block;
1630
1631 while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1632 if (!mb_test_bit(i, bitmap)) {
1633 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1634 if (max >= sbi->s_stripe) {
1635 ac->ac_found++;
1636 ac->ac_b_ex = ex;
1637 ext4_mb_use_best_found(ac, e4b);
1638 break;
1639 }
1640 }
1641 i += sbi->s_stripe;
1642 }
1643}
1644
1645static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1646 ext4_group_t group, int cr)
1647{
1648 unsigned free, fragments;
1649 unsigned i, bits;
1650 struct ext4_group_desc *desc;
1651 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1652
1653 BUG_ON(cr < 0 || cr >= 4);
1654 BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1655
1656 free = grp->bb_free;
1657 fragments = grp->bb_fragments;
1658 if (free == 0)
1659 return 0;
1660 if (fragments == 0)
1661 return 0;
1662
1663 switch (cr) {
1664 case 0:
1665 BUG_ON(ac->ac_2order == 0);
1666 /* If this group is uninitialized, skip it initially */
1667 desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1668 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1669 return 0;
1670
1671 bits = ac->ac_sb->s_blocksize_bits + 1;
1672 for (i = ac->ac_2order; i <= bits; i++)
1673 if (grp->bb_counters[i] > 0)
1674 return 1;
1675 break;
1676 case 1:
1677 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1678 return 1;
1679 break;
1680 case 2:
1681 if (free >= ac->ac_g_ex.fe_len)
1682 return 1;
1683 break;
1684 case 3:
1685 return 1;
1686 default:
1687 BUG();
1688 }
1689
1690 return 0;
1691}
1692
4ddfef7b
ES
1693static noinline_for_stack int
1694ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
c9de560d
AT
1695{
1696 ext4_group_t group;
1697 ext4_group_t i;
1698 int cr;
1699 int err = 0;
1700 int bsbits;
1701 struct ext4_sb_info *sbi;
1702 struct super_block *sb;
1703 struct ext4_buddy e4b;
1704 loff_t size, isize;
1705
1706 sb = ac->ac_sb;
1707 sbi = EXT4_SB(sb);
1708 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1709
1710 /* first, try the goal */
1711 err = ext4_mb_find_by_goal(ac, &e4b);
1712 if (err || ac->ac_status == AC_STATUS_FOUND)
1713 goto out;
1714
1715 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1716 goto out;
1717
1718 /*
1719 * ac->ac2_order is set only if the fe_len is a power of 2
1720 * if ac2_order is set we also set criteria to 0 so that we
1721 * try exact allocation using buddy.
1722 */
1723 i = fls(ac->ac_g_ex.fe_len);
1724 ac->ac_2order = 0;
1725 /*
1726 * We search using buddy data only if the order of the request
1727 * is greater than equal to the sbi_s_mb_order2_reqs
1728 * You can tune it via /proc/fs/ext4/<partition>/order2_req
1729 */
1730 if (i >= sbi->s_mb_order2_reqs) {
1731 /*
1732 * This should tell if fe_len is exactly power of 2
1733 */
1734 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1735 ac->ac_2order = i - 1;
1736 }
1737
1738 bsbits = ac->ac_sb->s_blocksize_bits;
1739 /* if stream allocation is enabled, use global goal */
1740 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1741 isize = i_size_read(ac->ac_inode) >> bsbits;
1742 if (size < isize)
1743 size = isize;
1744
1745 if (size < sbi->s_mb_stream_request &&
1746 (ac->ac_flags & EXT4_MB_HINT_DATA)) {
1747 /* TBD: may be hot point */
1748 spin_lock(&sbi->s_md_lock);
1749 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1750 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1751 spin_unlock(&sbi->s_md_lock);
1752 }
c9de560d
AT
1753 /* Let's just scan groups to find more-less suitable blocks */
1754 cr = ac->ac_2order ? 0 : 1;
1755 /*
1756 * cr == 0 try to get exact allocation,
1757 * cr == 3 try to get anything
1758 */
1759repeat:
1760 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1761 ac->ac_criteria = cr;
ed8f9c75
AK
1762 /*
1763 * searching for the right group start
1764 * from the goal value specified
1765 */
1766 group = ac->ac_g_ex.fe_group;
1767
c9de560d
AT
1768 for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
1769 struct ext4_group_info *grp;
1770 struct ext4_group_desc *desc;
1771
1772 if (group == EXT4_SB(sb)->s_groups_count)
1773 group = 0;
1774
1775 /* quick check to skip empty groups */
1776 grp = ext4_get_group_info(ac->ac_sb, group);
1777 if (grp->bb_free == 0)
1778 continue;
1779
1780 /*
1781 * if the group is already init we check whether it is
1782 * a good group and if not we don't load the buddy
1783 */
1784 if (EXT4_MB_GRP_NEED_INIT(grp)) {
1785 /*
1786 * we need full data about the group
1787 * to make a good selection
1788 */
1789 err = ext4_mb_load_buddy(sb, group, &e4b);
1790 if (err)
1791 goto out;
1792 ext4_mb_release_desc(&e4b);
1793 }
1794
1795 /*
1796 * If the particular group doesn't satisfy our
1797 * criteria we continue with the next group
1798 */
1799 if (!ext4_mb_good_group(ac, group, cr))
1800 continue;
1801
1802 err = ext4_mb_load_buddy(sb, group, &e4b);
1803 if (err)
1804 goto out;
1805
1806 ext4_lock_group(sb, group);
1807 if (!ext4_mb_good_group(ac, group, cr)) {
1808 /* someone did allocation from this group */
1809 ext4_unlock_group(sb, group);
1810 ext4_mb_release_desc(&e4b);
1811 continue;
1812 }
1813
1814 ac->ac_groups_scanned++;
1815 desc = ext4_get_group_desc(sb, group, NULL);
1816 if (cr == 0 || (desc->bg_flags &
1817 cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
1818 ac->ac_2order != 0))
1819 ext4_mb_simple_scan_group(ac, &e4b);
1820 else if (cr == 1 &&
1821 ac->ac_g_ex.fe_len == sbi->s_stripe)
1822 ext4_mb_scan_aligned(ac, &e4b);
1823 else
1824 ext4_mb_complex_scan_group(ac, &e4b);
1825
1826 ext4_unlock_group(sb, group);
1827 ext4_mb_release_desc(&e4b);
1828
1829 if (ac->ac_status != AC_STATUS_CONTINUE)
1830 break;
1831 }
1832 }
1833
1834 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
1835 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1836 /*
1837 * We've been searching too long. Let's try to allocate
1838 * the best chunk we've found so far
1839 */
1840
1841 ext4_mb_try_best_found(ac, &e4b);
1842 if (ac->ac_status != AC_STATUS_FOUND) {
1843 /*
1844 * Someone more lucky has already allocated it.
1845 * The only thing we can do is just take first
1846 * found block(s)
1847 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
1848 */
1849 ac->ac_b_ex.fe_group = 0;
1850 ac->ac_b_ex.fe_start = 0;
1851 ac->ac_b_ex.fe_len = 0;
1852 ac->ac_status = AC_STATUS_CONTINUE;
1853 ac->ac_flags |= EXT4_MB_HINT_FIRST;
1854 cr = 3;
1855 atomic_inc(&sbi->s_mb_lost_chunks);
1856 goto repeat;
1857 }
1858 }
1859out:
1860 return err;
1861}
1862
1863#ifdef EXT4_MB_HISTORY
1864struct ext4_mb_proc_session {
1865 struct ext4_mb_history *history;
1866 struct super_block *sb;
1867 int start;
1868 int max;
1869};
1870
1871static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
1872 struct ext4_mb_history *hs,
1873 int first)
1874{
1875 if (hs == s->history + s->max)
1876 hs = s->history;
1877 if (!first && hs == s->history + s->start)
1878 return NULL;
1879 while (hs->orig.fe_len == 0) {
1880 hs++;
1881 if (hs == s->history + s->max)
1882 hs = s->history;
1883 if (hs == s->history + s->start)
1884 return NULL;
1885 }
1886 return hs;
1887}
1888
1889static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
1890{
1891 struct ext4_mb_proc_session *s = seq->private;
1892 struct ext4_mb_history *hs;
1893 int l = *pos;
1894
1895 if (l == 0)
1896 return SEQ_START_TOKEN;
1897 hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1898 if (!hs)
1899 return NULL;
1900 while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
1901 return hs;
1902}
1903
1904static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
1905 loff_t *pos)
1906{
1907 struct ext4_mb_proc_session *s = seq->private;
1908 struct ext4_mb_history *hs = v;
1909
1910 ++*pos;
1911 if (v == SEQ_START_TOKEN)
1912 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1913 else
1914 return ext4_mb_history_skip_empty(s, ++hs, 0);
1915}
1916
1917static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
1918{
1919 char buf[25], buf2[25], buf3[25], *fmt;
1920 struct ext4_mb_history *hs = v;
1921
1922 if (v == SEQ_START_TOKEN) {
1923 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
1924 "%-5s %-2s %-5s %-5s %-5s %-6s\n",
1925 "pid", "inode", "original", "goal", "result", "found",
1926 "grps", "cr", "flags", "merge", "tail", "broken");
1927 return 0;
1928 }
1929
1930 if (hs->op == EXT4_MB_HISTORY_ALLOC) {
1931 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
1932 "%-5u %-5s %-5u %-6u\n";
1933 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
1934 hs->result.fe_start, hs->result.fe_len,
1935 hs->result.fe_logical);
1936 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
1937 hs->orig.fe_start, hs->orig.fe_len,
1938 hs->orig.fe_logical);
1939 sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group,
1940 hs->goal.fe_start, hs->goal.fe_len,
1941 hs->goal.fe_logical);
1942 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
1943 hs->found, hs->groups, hs->cr, hs->flags,
1944 hs->merged ? "M" : "", hs->tail,
1945 hs->buddy ? 1 << hs->buddy : 0);
1946 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
1947 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
1948 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
1949 hs->result.fe_start, hs->result.fe_len,
1950 hs->result.fe_logical);
1951 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
1952 hs->orig.fe_start, hs->orig.fe_len,
1953 hs->orig.fe_logical);
1954 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
1955 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
1956 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
1957 hs->result.fe_start, hs->result.fe_len);
1958 seq_printf(seq, "%-5u %-8u %-23s discard\n",
1959 hs->pid, hs->ino, buf2);
1960 } else if (hs->op == EXT4_MB_HISTORY_FREE) {
1961 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
1962 hs->result.fe_start, hs->result.fe_len);
1963 seq_printf(seq, "%-5u %-8u %-23s free\n",
1964 hs->pid, hs->ino, buf2);
1965 }
1966 return 0;
1967}
1968
1969static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
1970{
1971}
1972
1973static struct seq_operations ext4_mb_seq_history_ops = {
1974 .start = ext4_mb_seq_history_start,
1975 .next = ext4_mb_seq_history_next,
1976 .stop = ext4_mb_seq_history_stop,
1977 .show = ext4_mb_seq_history_show,
1978};
1979
1980static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
1981{
1982 struct super_block *sb = PDE(inode)->data;
1983 struct ext4_sb_info *sbi = EXT4_SB(sb);
1984 struct ext4_mb_proc_session *s;
1985 int rc;
1986 int size;
1987
74767c5a
SF
1988 if (unlikely(sbi->s_mb_history == NULL))
1989 return -ENOMEM;
c9de560d
AT
1990 s = kmalloc(sizeof(*s), GFP_KERNEL);
1991 if (s == NULL)
1992 return -ENOMEM;
1993 s->sb = sb;
1994 size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
1995 s->history = kmalloc(size, GFP_KERNEL);
1996 if (s->history == NULL) {
1997 kfree(s);
1998 return -ENOMEM;
1999 }
2000
2001 spin_lock(&sbi->s_mb_history_lock);
2002 memcpy(s->history, sbi->s_mb_history, size);
2003 s->max = sbi->s_mb_history_max;
2004 s->start = sbi->s_mb_history_cur % s->max;
2005 spin_unlock(&sbi->s_mb_history_lock);
2006
2007 rc = seq_open(file, &ext4_mb_seq_history_ops);
2008 if (rc == 0) {
2009 struct seq_file *m = (struct seq_file *)file->private_data;
2010 m->private = s;
2011 } else {
2012 kfree(s->history);
2013 kfree(s);
2014 }
2015 return rc;
2016
2017}
2018
2019static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2020{
2021 struct seq_file *seq = (struct seq_file *)file->private_data;
2022 struct ext4_mb_proc_session *s = seq->private;
2023 kfree(s->history);
2024 kfree(s);
2025 return seq_release(inode, file);
2026}
2027
2028static ssize_t ext4_mb_seq_history_write(struct file *file,
2029 const char __user *buffer,
2030 size_t count, loff_t *ppos)
2031{
2032 struct seq_file *seq = (struct seq_file *)file->private_data;
2033 struct ext4_mb_proc_session *s = seq->private;
2034 struct super_block *sb = s->sb;
2035 char str[32];
2036 int value;
2037
2038 if (count >= sizeof(str)) {
2039 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2040 "mb_history", (int)sizeof(str));
2041 return -EOVERFLOW;
2042 }
2043
2044 if (copy_from_user(str, buffer, count))
2045 return -EFAULT;
2046
2047 value = simple_strtol(str, NULL, 0);
2048 if (value < 0)
2049 return -ERANGE;
2050 EXT4_SB(sb)->s_mb_history_filter = value;
2051
2052 return count;
2053}
2054
2055static struct file_operations ext4_mb_seq_history_fops = {
2056 .owner = THIS_MODULE,
2057 .open = ext4_mb_seq_history_open,
2058 .read = seq_read,
2059 .write = ext4_mb_seq_history_write,
2060 .llseek = seq_lseek,
2061 .release = ext4_mb_seq_history_release,
2062};
2063
2064static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2065{
2066 struct super_block *sb = seq->private;
2067 struct ext4_sb_info *sbi = EXT4_SB(sb);
2068 ext4_group_t group;
2069
2070 if (*pos < 0 || *pos >= sbi->s_groups_count)
2071 return NULL;
2072
2073 group = *pos + 1;
2074 return (void *) group;
2075}
2076
2077static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2078{
2079 struct super_block *sb = seq->private;
2080 struct ext4_sb_info *sbi = EXT4_SB(sb);
2081 ext4_group_t group;
2082
2083 ++*pos;
2084 if (*pos < 0 || *pos >= sbi->s_groups_count)
2085 return NULL;
2086 group = *pos + 1;
2087 return (void *) group;;
2088}
2089
2090static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2091{
2092 struct super_block *sb = seq->private;
2093 long group = (long) v;
2094 int i;
2095 int err;
2096 struct ext4_buddy e4b;
2097 struct sg {
2098 struct ext4_group_info info;
2099 unsigned short counters[16];
2100 } sg;
2101
2102 group--;
2103 if (group == 0)
2104 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2105 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2106 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2107 "group", "free", "frags", "first",
2108 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2109 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2110
2111 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2112 sizeof(struct ext4_group_info);
2113 err = ext4_mb_load_buddy(sb, group, &e4b);
2114 if (err) {
2115 seq_printf(seq, "#%-5lu: I/O error\n", group);
2116 return 0;
2117 }
2118 ext4_lock_group(sb, group);
2119 memcpy(&sg, ext4_get_group_info(sb, group), i);
2120 ext4_unlock_group(sb, group);
2121 ext4_mb_release_desc(&e4b);
2122
2123 seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
2124 sg.info.bb_fragments, sg.info.bb_first_free);
2125 for (i = 0; i <= 13; i++)
2126 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2127 sg.info.bb_counters[i] : 0);
2128 seq_printf(seq, " ]\n");
2129
2130 return 0;
2131}
2132
2133static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2134{
2135}
2136
2137static struct seq_operations ext4_mb_seq_groups_ops = {
2138 .start = ext4_mb_seq_groups_start,
2139 .next = ext4_mb_seq_groups_next,
2140 .stop = ext4_mb_seq_groups_stop,
2141 .show = ext4_mb_seq_groups_show,
2142};
2143
2144static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2145{
2146 struct super_block *sb = PDE(inode)->data;
2147 int rc;
2148
2149 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2150 if (rc == 0) {
2151 struct seq_file *m = (struct seq_file *)file->private_data;
2152 m->private = sb;
2153 }
2154 return rc;
2155
2156}
2157
2158static struct file_operations ext4_mb_seq_groups_fops = {
2159 .owner = THIS_MODULE,
2160 .open = ext4_mb_seq_groups_open,
2161 .read = seq_read,
2162 .llseek = seq_lseek,
2163 .release = seq_release,
2164};
2165
2166static void ext4_mb_history_release(struct super_block *sb)
2167{
2168 struct ext4_sb_info *sbi = EXT4_SB(sb);
2169
9f6200bb
TT
2170 if (sbi->s_proc != NULL) {
2171 remove_proc_entry("mb_groups", sbi->s_proc);
2172 remove_proc_entry("mb_history", sbi->s_proc);
2173 }
c9de560d
AT
2174 kfree(sbi->s_mb_history);
2175}
2176
2177static void ext4_mb_history_init(struct super_block *sb)
2178{
2179 struct ext4_sb_info *sbi = EXT4_SB(sb);
2180 int i;
2181
9f6200bb
TT
2182 if (sbi->s_proc != NULL) {
2183 proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
46fe74f2 2184 &ext4_mb_seq_history_fops, sb);
9f6200bb 2185 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
46fe74f2 2186 &ext4_mb_seq_groups_fops, sb);
c9de560d
AT
2187 }
2188
2189 sbi->s_mb_history_max = 1000;
2190 sbi->s_mb_history_cur = 0;
2191 spin_lock_init(&sbi->s_mb_history_lock);
2192 i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
74767c5a 2193 sbi->s_mb_history = kzalloc(i, GFP_KERNEL);
c9de560d
AT
2194 /* if we can't allocate history, then we simple won't use it */
2195}
2196
4ddfef7b
ES
2197static noinline_for_stack void
2198ext4_mb_store_history(struct ext4_allocation_context *ac)
c9de560d
AT
2199{
2200 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2201 struct ext4_mb_history h;
2202
2203 if (unlikely(sbi->s_mb_history == NULL))
2204 return;
2205
2206 if (!(ac->ac_op & sbi->s_mb_history_filter))
2207 return;
2208
2209 h.op = ac->ac_op;
2210 h.pid = current->pid;
2211 h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2212 h.orig = ac->ac_o_ex;
2213 h.result = ac->ac_b_ex;
2214 h.flags = ac->ac_flags;
2215 h.found = ac->ac_found;
2216 h.groups = ac->ac_groups_scanned;
2217 h.cr = ac->ac_criteria;
2218 h.tail = ac->ac_tail;
2219 h.buddy = ac->ac_buddy;
2220 h.merged = 0;
2221 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2222 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2223 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2224 h.merged = 1;
2225 h.goal = ac->ac_g_ex;
2226 h.result = ac->ac_f_ex;
2227 }
2228
2229 spin_lock(&sbi->s_mb_history_lock);
2230 memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2231 if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2232 sbi->s_mb_history_cur = 0;
2233 spin_unlock(&sbi->s_mb_history_lock);
2234}
2235
2236#else
2237#define ext4_mb_history_release(sb)
2238#define ext4_mb_history_init(sb)
2239#endif
2240
5f21b0e6
FB
2241
2242/* Create and initialize ext4_group_info data for the given group. */
2243int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2244 struct ext4_group_desc *desc)
2245{
2246 int i, len;
2247 int metalen = 0;
2248 struct ext4_sb_info *sbi = EXT4_SB(sb);
2249 struct ext4_group_info **meta_group_info;
2250
2251 /*
2252 * First check if this group is the first of a reserved block.
2253 * If it's true, we have to allocate a new table of pointers
2254 * to ext4_group_info structures
2255 */
2256 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2257 metalen = sizeof(*meta_group_info) <<
2258 EXT4_DESC_PER_BLOCK_BITS(sb);
2259 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2260 if (meta_group_info == NULL) {
2261 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2262 "buddy group\n");
2263 goto exit_meta_group_info;
2264 }
2265 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2266 meta_group_info;
2267 }
2268
2269 /*
2270 * calculate needed size. if change bb_counters size,
2271 * don't forget about ext4_mb_generate_buddy()
2272 */
2273 len = offsetof(typeof(**meta_group_info),
2274 bb_counters[sb->s_blocksize_bits + 2]);
2275
2276 meta_group_info =
2277 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2278 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2279
2280 meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2281 if (meta_group_info[i] == NULL) {
2282 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2283 goto exit_group_info;
2284 }
2285 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2286 &(meta_group_info[i]->bb_state));
2287
2288 /*
2289 * initialize bb_free to be able to skip
2290 * empty groups without initialization
2291 */
2292 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2293 meta_group_info[i]->bb_free =
2294 ext4_free_blocks_after_init(sb, group, desc);
2295 } else {
2296 meta_group_info[i]->bb_free =
2297 le16_to_cpu(desc->bg_free_blocks_count);
2298 }
2299
2300 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2301
2302#ifdef DOUBLE_CHECK
2303 {
2304 struct buffer_head *bh;
2305 meta_group_info[i]->bb_bitmap =
2306 kmalloc(sb->s_blocksize, GFP_KERNEL);
2307 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2308 bh = ext4_read_block_bitmap(sb, group);
2309 BUG_ON(bh == NULL);
2310 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2311 sb->s_blocksize);
2312 put_bh(bh);
2313 }
2314#endif
2315
2316 return 0;
2317
2318exit_group_info:
2319 /* If a meta_group_info table has been allocated, release it now */
2320 if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2321 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2322exit_meta_group_info:
2323 return -ENOMEM;
2324} /* ext4_mb_add_groupinfo */
2325
2326/*
2327 * Add a group to the existing groups.
2328 * This function is used for online resize
2329 */
2330int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
2331 struct ext4_group_desc *desc)
2332{
2333 struct ext4_sb_info *sbi = EXT4_SB(sb);
2334 struct inode *inode = sbi->s_buddy_cache;
2335 int blocks_per_page;
2336 int block;
2337 int pnum;
2338 struct page *page;
2339 int err;
2340
2341 /* Add group based on group descriptor*/
2342 err = ext4_mb_add_groupinfo(sb, group, desc);
2343 if (err)
2344 return err;
2345
2346 /*
2347 * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
2348 * datas) are set not up to date so that they will be re-initilaized
2349 * during the next call to ext4_mb_load_buddy
2350 */
2351
2352 /* Set buddy page as not up to date */
2353 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
2354 block = group * 2;
2355 pnum = block / blocks_per_page;
2356 page = find_get_page(inode->i_mapping, pnum);
2357 if (page != NULL) {
2358 ClearPageUptodate(page);
2359 page_cache_release(page);
2360 }
2361
2362 /* Set bitmap page as not up to date */
2363 block++;
2364 pnum = block / blocks_per_page;
2365 page = find_get_page(inode->i_mapping, pnum);
2366 if (page != NULL) {
2367 ClearPageUptodate(page);
2368 page_cache_release(page);
2369 }
2370
2371 return 0;
2372}
2373
2374/*
2375 * Update an existing group.
2376 * This function is used for online resize
2377 */
2378void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
2379{
2380 grp->bb_free += add;
2381}
2382
c9de560d
AT
2383static int ext4_mb_init_backend(struct super_block *sb)
2384{
2385 ext4_group_t i;
5f21b0e6 2386 int metalen;
c9de560d 2387 struct ext4_sb_info *sbi = EXT4_SB(sb);
5f21b0e6
FB
2388 struct ext4_super_block *es = sbi->s_es;
2389 int num_meta_group_infos;
2390 int num_meta_group_infos_max;
2391 int array_size;
c9de560d 2392 struct ext4_group_info **meta_group_info;
5f21b0e6
FB
2393 struct ext4_group_desc *desc;
2394
2395 /* This is the number of blocks used by GDT */
2396 num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) -
2397 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2398
2399 /*
2400 * This is the total number of blocks used by GDT including
2401 * the number of reserved blocks for GDT.
2402 * The s_group_info array is allocated with this value
2403 * to allow a clean online resize without a complex
2404 * manipulation of pointer.
2405 * The drawback is the unused memory when no resize
2406 * occurs but it's very low in terms of pages
2407 * (see comments below)
2408 * Need to handle this properly when META_BG resizing is allowed
2409 */
2410 num_meta_group_infos_max = num_meta_group_infos +
2411 le16_to_cpu(es->s_reserved_gdt_blocks);
c9de560d 2412
5f21b0e6
FB
2413 /*
2414 * array_size is the size of s_group_info array. We round it
2415 * to the next power of two because this approximation is done
2416 * internally by kmalloc so we can have some more memory
2417 * for free here (e.g. may be used for META_BG resize).
2418 */
2419 array_size = 1;
2420 while (array_size < sizeof(*sbi->s_group_info) *
2421 num_meta_group_infos_max)
2422 array_size = array_size << 1;
c9de560d
AT
2423 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2424 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2425 * So a two level scheme suffices for now. */
5f21b0e6 2426 sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
c9de560d
AT
2427 if (sbi->s_group_info == NULL) {
2428 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2429 return -ENOMEM;
2430 }
2431 sbi->s_buddy_cache = new_inode(sb);
2432 if (sbi->s_buddy_cache == NULL) {
2433 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2434 goto err_freesgi;
2435 }
2436 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2437
2438 metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2439 for (i = 0; i < num_meta_group_infos; i++) {
2440 if ((i + 1) == num_meta_group_infos)
2441 metalen = sizeof(*meta_group_info) *
2442 (sbi->s_groups_count -
2443 (i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2444 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2445 if (meta_group_info == NULL) {
2446 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2447 "buddy group\n");
2448 goto err_freemeta;
2449 }
2450 sbi->s_group_info[i] = meta_group_info;
2451 }
2452
c9de560d 2453 for (i = 0; i < sbi->s_groups_count; i++) {
c9de560d
AT
2454 desc = ext4_get_group_desc(sb, i, NULL);
2455 if (desc == NULL) {
2456 printk(KERN_ERR
2457 "EXT4-fs: can't read descriptor %lu\n", i);
2458 goto err_freebuddy;
2459 }
5f21b0e6
FB
2460 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2461 goto err_freebuddy;
c9de560d
AT
2462 }
2463
2464 return 0;
2465
2466err_freebuddy:
f1fa3342 2467 while (i-- > 0)
c9de560d 2468 kfree(ext4_get_group_info(sb, i));
c9de560d
AT
2469 i = num_meta_group_infos;
2470err_freemeta:
f1fa3342 2471 while (i-- > 0)
c9de560d
AT
2472 kfree(sbi->s_group_info[i]);
2473 iput(sbi->s_buddy_cache);
2474err_freesgi:
2475 kfree(sbi->s_group_info);
2476 return -ENOMEM;
2477}
2478
2479int ext4_mb_init(struct super_block *sb, int needs_recovery)
2480{
2481 struct ext4_sb_info *sbi = EXT4_SB(sb);
6be2ded1 2482 unsigned i, j;
c9de560d
AT
2483 unsigned offset;
2484 unsigned max;
74767c5a 2485 int ret;
c9de560d 2486
c9de560d
AT
2487 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2488
2489 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2490 if (sbi->s_mb_offsets == NULL) {
c9de560d
AT
2491 return -ENOMEM;
2492 }
2493 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2494 if (sbi->s_mb_maxs == NULL) {
c9de560d
AT
2495 kfree(sbi->s_mb_maxs);
2496 return -ENOMEM;
2497 }
2498
2499 /* order 0 is regular bitmap */
2500 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2501 sbi->s_mb_offsets[0] = 0;
2502
2503 i = 1;
2504 offset = 0;
2505 max = sb->s_blocksize << 2;
2506 do {
2507 sbi->s_mb_offsets[i] = offset;
2508 sbi->s_mb_maxs[i] = max;
2509 offset += 1 << (sb->s_blocksize_bits - i);
2510 max = max >> 1;
2511 i++;
2512 } while (i <= sb->s_blocksize_bits + 1);
2513
2514 /* init file for buddy data */
74767c5a
SF
2515 ret = ext4_mb_init_backend(sb);
2516 if (ret != 0) {
c9de560d
AT
2517 kfree(sbi->s_mb_offsets);
2518 kfree(sbi->s_mb_maxs);
74767c5a 2519 return ret;
c9de560d
AT
2520 }
2521
2522 spin_lock_init(&sbi->s_md_lock);
2523 INIT_LIST_HEAD(&sbi->s_active_transaction);
2524 INIT_LIST_HEAD(&sbi->s_closed_transaction);
2525 INIT_LIST_HEAD(&sbi->s_committed_transaction);
2526 spin_lock_init(&sbi->s_bal_lock);
2527
2528 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2529 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2530 sbi->s_mb_stats = MB_DEFAULT_STATS;
2531 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2532 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2533 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2534 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2535
730c213c 2536 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
c9de560d 2537 if (sbi->s_locality_groups == NULL) {
c9de560d
AT
2538 kfree(sbi->s_mb_offsets);
2539 kfree(sbi->s_mb_maxs);
2540 return -ENOMEM;
2541 }
730c213c 2542 for_each_possible_cpu(i) {
c9de560d 2543 struct ext4_locality_group *lg;
730c213c 2544 lg = per_cpu_ptr(sbi->s_locality_groups, i);
c9de560d 2545 mutex_init(&lg->lg_mutex);
6be2ded1
AK
2546 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2547 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
c9de560d
AT
2548 spin_lock_init(&lg->lg_prealloc_lock);
2549 }
2550
2551 ext4_mb_init_per_dev_proc(sb);
2552 ext4_mb_history_init(sb);
2553
4776004f 2554 printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
c9de560d
AT
2555 return 0;
2556}
2557
2558/* need to called with ext4 group lock (ext4_lock_group) */
2559static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2560{
2561 struct ext4_prealloc_space *pa;
2562 struct list_head *cur, *tmp;
2563 int count = 0;
2564
2565 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2566 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2567 list_del(&pa->pa_group_list);
2568 count++;
2569 kfree(pa);
2570 }
2571 if (count)
2572 mb_debug("mballoc: %u PAs left\n", count);
2573
2574}
2575
2576int ext4_mb_release(struct super_block *sb)
2577{
2578 ext4_group_t i;
2579 int num_meta_group_infos;
2580 struct ext4_group_info *grinfo;
2581 struct ext4_sb_info *sbi = EXT4_SB(sb);
2582
c9de560d
AT
2583 /* release freed, non-committed blocks */
2584 spin_lock(&sbi->s_md_lock);
2585 list_splice_init(&sbi->s_closed_transaction,
2586 &sbi->s_committed_transaction);
2587 list_splice_init(&sbi->s_active_transaction,
2588 &sbi->s_committed_transaction);
2589 spin_unlock(&sbi->s_md_lock);
2590 ext4_mb_free_committed_blocks(sb);
2591
2592 if (sbi->s_group_info) {
2593 for (i = 0; i < sbi->s_groups_count; i++) {
2594 grinfo = ext4_get_group_info(sb, i);
2595#ifdef DOUBLE_CHECK
2596 kfree(grinfo->bb_bitmap);
2597#endif
2598 ext4_lock_group(sb, i);
2599 ext4_mb_cleanup_pa(grinfo);
2600 ext4_unlock_group(sb, i);
2601 kfree(grinfo);
2602 }
2603 num_meta_group_infos = (sbi->s_groups_count +
2604 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2605 EXT4_DESC_PER_BLOCK_BITS(sb);
2606 for (i = 0; i < num_meta_group_infos; i++)
2607 kfree(sbi->s_group_info[i]);
2608 kfree(sbi->s_group_info);
2609 }
2610 kfree(sbi->s_mb_offsets);
2611 kfree(sbi->s_mb_maxs);
2612 if (sbi->s_buddy_cache)
2613 iput(sbi->s_buddy_cache);
2614 if (sbi->s_mb_stats) {
2615 printk(KERN_INFO
2616 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2617 atomic_read(&sbi->s_bal_allocated),
2618 atomic_read(&sbi->s_bal_reqs),
2619 atomic_read(&sbi->s_bal_success));
2620 printk(KERN_INFO
2621 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2622 "%u 2^N hits, %u breaks, %u lost\n",
2623 atomic_read(&sbi->s_bal_ex_scanned),
2624 atomic_read(&sbi->s_bal_goals),
2625 atomic_read(&sbi->s_bal_2orders),
2626 atomic_read(&sbi->s_bal_breaks),
2627 atomic_read(&sbi->s_mb_lost_chunks));
2628 printk(KERN_INFO
2629 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2630 sbi->s_mb_buddies_generated++,
2631 sbi->s_mb_generation_time);
2632 printk(KERN_INFO
2633 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2634 atomic_read(&sbi->s_mb_preallocated),
2635 atomic_read(&sbi->s_mb_discarded));
2636 }
2637
730c213c 2638 free_percpu(sbi->s_locality_groups);
c9de560d
AT
2639 ext4_mb_history_release(sb);
2640 ext4_mb_destroy_per_dev_proc(sb);
2641
2642 return 0;
2643}
2644
4ddfef7b
ES
2645static noinline_for_stack void
2646ext4_mb_free_committed_blocks(struct super_block *sb)
c9de560d
AT
2647{
2648 struct ext4_sb_info *sbi = EXT4_SB(sb);
2649 int err;
2650 int i;
2651 int count = 0;
2652 int count2 = 0;
2653 struct ext4_free_metadata *md;
2654 struct ext4_buddy e4b;
2655
2656 if (list_empty(&sbi->s_committed_transaction))
2657 return;
2658
2659 /* there is committed blocks to be freed yet */
2660 do {
2661 /* get next array of blocks */
2662 md = NULL;
2663 spin_lock(&sbi->s_md_lock);
2664 if (!list_empty(&sbi->s_committed_transaction)) {
2665 md = list_entry(sbi->s_committed_transaction.next,
2666 struct ext4_free_metadata, list);
2667 list_del(&md->list);
2668 }
2669 spin_unlock(&sbi->s_md_lock);
2670
2671 if (md == NULL)
2672 break;
2673
2674 mb_debug("gonna free %u blocks in group %lu (0x%p):",
2675 md->num, md->group, md);
2676
2677 err = ext4_mb_load_buddy(sb, md->group, &e4b);
2678 /* we expect to find existing buddy because it's pinned */
2679 BUG_ON(err != 0);
2680
2681 /* there are blocks to put in buddy to make them really free */
2682 count += md->num;
2683 count2++;
2684 ext4_lock_group(sb, md->group);
2685 for (i = 0; i < md->num; i++) {
2686 mb_debug(" %u", md->blocks[i]);
7e5a8cdd 2687 mb_free_blocks(NULL, &e4b, md->blocks[i], 1);
c9de560d
AT
2688 }
2689 mb_debug("\n");
2690 ext4_unlock_group(sb, md->group);
2691
2692 /* balance refcounts from ext4_mb_free_metadata() */
2693 page_cache_release(e4b.bd_buddy_page);
2694 page_cache_release(e4b.bd_bitmap_page);
2695
2696 kfree(md);
2697 ext4_mb_release_desc(&e4b);
2698
2699 } while (md);
2700
2701 mb_debug("freed %u blocks in %u structures\n", count, count2);
2702}
2703
c9de560d
AT
2704#define EXT4_MB_STATS_NAME "stats"
2705#define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan"
2706#define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan"
2707#define EXT4_MB_ORDER2_REQ "order2_req"
2708#define EXT4_MB_STREAM_REQ "stream_req"
2709#define EXT4_MB_GROUP_PREALLOC "group_prealloc"
2710
c9de560d
AT
2711static int ext4_mb_init_per_dev_proc(struct super_block *sb)
2712{
2713 mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
2714 struct ext4_sb_info *sbi = EXT4_SB(sb);
2715 struct proc_dir_entry *proc;
c9de560d 2716
9f6200bb 2717 if (sbi->s_proc == NULL)
cfbe7e4f 2718 return -EINVAL;
c9de560d 2719
5e8814f2
TT
2720 EXT4_PROC_HANDLER(EXT4_MB_STATS_NAME, mb_stats);
2721 EXT4_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, mb_max_to_scan);
2722 EXT4_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, mb_min_to_scan);
2723 EXT4_PROC_HANDLER(EXT4_MB_ORDER2_REQ, mb_order2_reqs);
2724 EXT4_PROC_HANDLER(EXT4_MB_STREAM_REQ, mb_stream_request);
2725 EXT4_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, mb_group_prealloc);
c9de560d
AT
2726 return 0;
2727
2728err_out:
9f6200bb
TT
2729 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2730 remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2731 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2732 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2733 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2734 remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
c9de560d
AT
2735 return -ENOMEM;
2736}
2737
2738static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
2739{
2740 struct ext4_sb_info *sbi = EXT4_SB(sb);
c9de560d 2741
9f6200bb 2742 if (sbi->s_proc == NULL)
c9de560d
AT
2743 return -EINVAL;
2744
9f6200bb
TT
2745 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2746 remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2747 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2748 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2749 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2750 remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
c9de560d
AT
2751
2752 return 0;
2753}
2754
2755int __init init_ext4_mballoc(void)
2756{
2757 ext4_pspace_cachep =
2758 kmem_cache_create("ext4_prealloc_space",
2759 sizeof(struct ext4_prealloc_space),
2760 0, SLAB_RECLAIM_ACCOUNT, NULL);
2761 if (ext4_pspace_cachep == NULL)
2762 return -ENOMEM;
2763
256bdb49
ES
2764 ext4_ac_cachep =
2765 kmem_cache_create("ext4_alloc_context",
2766 sizeof(struct ext4_allocation_context),
2767 0, SLAB_RECLAIM_ACCOUNT, NULL);
2768 if (ext4_ac_cachep == NULL) {
2769 kmem_cache_destroy(ext4_pspace_cachep);
2770 return -ENOMEM;
2771 }
c9de560d
AT
2772 return 0;
2773}
2774
2775void exit_ext4_mballoc(void)
2776{
2777 /* XXX: synchronize_rcu(); */
2778 kmem_cache_destroy(ext4_pspace_cachep);
256bdb49 2779 kmem_cache_destroy(ext4_ac_cachep);
c9de560d
AT
2780}
2781
2782
2783/*
2784 * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2785 * Returns 0 if success or error code
2786 */
4ddfef7b
ES
2787static noinline_for_stack int
2788ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
6bc6e63f 2789 handle_t *handle, unsigned long reserv_blks)
c9de560d
AT
2790{
2791 struct buffer_head *bitmap_bh = NULL;
2792 struct ext4_super_block *es;
2793 struct ext4_group_desc *gdp;
2794 struct buffer_head *gdp_bh;
2795 struct ext4_sb_info *sbi;
2796 struct super_block *sb;
2797 ext4_fsblk_t block;
519deca0 2798 int err, len;
c9de560d
AT
2799
2800 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2801 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2802
2803 sb = ac->ac_sb;
2804 sbi = EXT4_SB(sb);
2805 es = sbi->s_es;
2806
c9de560d
AT
2807
2808 err = -EIO;
574ca174 2809 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
c9de560d
AT
2810 if (!bitmap_bh)
2811 goto out_err;
2812
2813 err = ext4_journal_get_write_access(handle, bitmap_bh);
2814 if (err)
2815 goto out_err;
2816
2817 err = -EIO;
2818 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2819 if (!gdp)
2820 goto out_err;
2821
03cddb80
AK
2822 ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
2823 gdp->bg_free_blocks_count);
2824
c9de560d
AT
2825 err = ext4_journal_get_write_access(handle, gdp_bh);
2826 if (err)
2827 goto out_err;
2828
2829 block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
2830 + ac->ac_b_ex.fe_start
2831 + le32_to_cpu(es->s_first_data_block);
2832
519deca0
AK
2833 len = ac->ac_b_ex.fe_len;
2834 if (in_range(ext4_block_bitmap(sb, gdp), block, len) ||
2835 in_range(ext4_inode_bitmap(sb, gdp), block, len) ||
2836 in_range(block, ext4_inode_table(sb, gdp),
2837 EXT4_SB(sb)->s_itb_per_group) ||
2838 in_range(block + len - 1, ext4_inode_table(sb, gdp),
2839 EXT4_SB(sb)->s_itb_per_group)) {
46e665e9 2840 ext4_error(sb, __func__,
c9de560d
AT
2841 "Allocating block in system zone - block = %llu",
2842 block);
519deca0
AK
2843 /* File system mounted not to panic on error
2844 * Fix the bitmap and repeat the block allocation
2845 * We leak some of the blocks here.
2846 */
2847 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
2848 bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2849 ac->ac_b_ex.fe_len);
2850 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
2851 if (!err)
2852 err = -EAGAIN;
2853 goto out_err;
c9de560d
AT
2854 }
2855#ifdef AGGRESSIVE_CHECK
2856 {
2857 int i;
2858 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2859 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2860 bitmap_bh->b_data));
2861 }
2862 }
2863#endif
2864 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
2865 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
2866
2867 spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2868 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2869 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2870 gdp->bg_free_blocks_count =
2871 cpu_to_le16(ext4_free_blocks_after_init(sb,
2872 ac->ac_b_ex.fe_group,
2873 gdp));
2874 }
e8546d06 2875 le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
c9de560d
AT
2876 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2877 spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
6bc6e63f 2878 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
d2a17637 2879 /*
6bc6e63f 2880 * Now reduce the dirty block count also. Should not go negative
d2a17637 2881 */
6bc6e63f
AK
2882 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2883 /* release all the reserved blocks if non delalloc */
2884 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
2885 else
2886 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
2887 ac->ac_b_ex.fe_len);
c9de560d 2888
772cb7c8
JS
2889 if (sbi->s_log_groups_per_flex) {
2890 ext4_group_t flex_group = ext4_flex_group(sbi,
2891 ac->ac_b_ex.fe_group);
2892 spin_lock(sb_bgl_lock(sbi, flex_group));
2893 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
2894 spin_unlock(sb_bgl_lock(sbi, flex_group));
2895 }
2896
c9de560d
AT
2897 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
2898 if (err)
2899 goto out_err;
2900 err = ext4_journal_dirty_metadata(handle, gdp_bh);
2901
2902out_err:
2903 sb->s_dirt = 1;
42a10add 2904 brelse(bitmap_bh);
c9de560d
AT
2905 return err;
2906}
2907
2908/*
2909 * here we normalize request for locality group
2910 * Group request are normalized to s_strip size if we set the same via mount
2911 * option. If not we set it to s_mb_group_prealloc which can be configured via
2912 * /proc/fs/ext4/<partition>/group_prealloc
2913 *
2914 * XXX: should we try to preallocate more than the group has now?
2915 */
2916static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2917{
2918 struct super_block *sb = ac->ac_sb;
2919 struct ext4_locality_group *lg = ac->ac_lg;
2920
2921 BUG_ON(lg == NULL);
2922 if (EXT4_SB(sb)->s_stripe)
2923 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
2924 else
2925 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
60bd63d1 2926 mb_debug("#%u: goal %u blocks for locality group\n",
c9de560d
AT
2927 current->pid, ac->ac_g_ex.fe_len);
2928}
2929
2930/*
2931 * Normalization means making request better in terms of
2932 * size and alignment
2933 */
4ddfef7b
ES
2934static noinline_for_stack void
2935ext4_mb_normalize_request(struct ext4_allocation_context *ac,
c9de560d
AT
2936 struct ext4_allocation_request *ar)
2937{
2938 int bsbits, max;
2939 ext4_lblk_t end;
c9de560d
AT
2940 loff_t size, orig_size, start_off;
2941 ext4_lblk_t start, orig_start;
2942 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
9a0762c5 2943 struct ext4_prealloc_space *pa;
c9de560d
AT
2944
2945 /* do normalize only data requests, metadata requests
2946 do not need preallocation */
2947 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2948 return;
2949
2950 /* sometime caller may want exact blocks */
2951 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2952 return;
2953
2954 /* caller may indicate that preallocation isn't
2955 * required (it's a tail, for example) */
2956 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2957 return;
2958
2959 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2960 ext4_mb_normalize_group_request(ac);
2961 return ;
2962 }
2963
2964 bsbits = ac->ac_sb->s_blocksize_bits;
2965
2966 /* first, let's learn actual file size
2967 * given current request is allocated */
2968 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2969 size = size << bsbits;
2970 if (size < i_size_read(ac->ac_inode))
2971 size = i_size_read(ac->ac_inode);
2972
1930479c
VC
2973 /* max size of free chunks */
2974 max = 2 << bsbits;
c9de560d 2975
1930479c
VC
2976#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
2977 (req <= (size) || max <= (chunk_size))
c9de560d
AT
2978
2979 /* first, try to predict filesize */
2980 /* XXX: should this table be tunable? */
2981 start_off = 0;
2982 if (size <= 16 * 1024) {
2983 size = 16 * 1024;
2984 } else if (size <= 32 * 1024) {
2985 size = 32 * 1024;
2986 } else if (size <= 64 * 1024) {
2987 size = 64 * 1024;
2988 } else if (size <= 128 * 1024) {
2989 size = 128 * 1024;
2990 } else if (size <= 256 * 1024) {
2991 size = 256 * 1024;
2992 } else if (size <= 512 * 1024) {
2993 size = 512 * 1024;
2994 } else if (size <= 1024 * 1024) {
2995 size = 1024 * 1024;
1930479c 2996 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
c9de560d 2997 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
1930479c
VC
2998 (21 - bsbits)) << 21;
2999 size = 2 * 1024 * 1024;
3000 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
c9de560d
AT
3001 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3002 (22 - bsbits)) << 22;
3003 size = 4 * 1024 * 1024;
3004 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
1930479c 3005 (8<<20)>>bsbits, max, 8 * 1024)) {
c9de560d
AT
3006 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3007 (23 - bsbits)) << 23;
3008 size = 8 * 1024 * 1024;
3009 } else {
3010 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3011 size = ac->ac_o_ex.fe_len << bsbits;
3012 }
3013 orig_size = size = size >> bsbits;
3014 orig_start = start = start_off >> bsbits;
3015
3016 /* don't cover already allocated blocks in selected range */
3017 if (ar->pleft && start <= ar->lleft) {
3018 size -= ar->lleft + 1 - start;
3019 start = ar->lleft + 1;
3020 }
3021 if (ar->pright && start + size - 1 >= ar->lright)
3022 size -= start + size - ar->lright;
3023
3024 end = start + size;
3025
3026 /* check we don't cross already preallocated blocks */
3027 rcu_read_lock();
9a0762c5 3028 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
c9de560d
AT
3029 unsigned long pa_end;
3030
c9de560d
AT
3031 if (pa->pa_deleted)
3032 continue;
3033 spin_lock(&pa->pa_lock);
3034 if (pa->pa_deleted) {
3035 spin_unlock(&pa->pa_lock);
3036 continue;
3037 }
3038
3039 pa_end = pa->pa_lstart + pa->pa_len;
3040
3041 /* PA must not overlap original request */
3042 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3043 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3044
3045 /* skip PA normalized request doesn't overlap with */
3046 if (pa->pa_lstart >= end) {
3047 spin_unlock(&pa->pa_lock);
3048 continue;
3049 }
3050 if (pa_end <= start) {
3051 spin_unlock(&pa->pa_lock);
3052 continue;
3053 }
3054 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3055
3056 if (pa_end <= ac->ac_o_ex.fe_logical) {
3057 BUG_ON(pa_end < start);
3058 start = pa_end;
3059 }
3060
3061 if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3062 BUG_ON(pa->pa_lstart > end);
3063 end = pa->pa_lstart;
3064 }
3065 spin_unlock(&pa->pa_lock);
3066 }
3067 rcu_read_unlock();
3068 size = end - start;
3069
3070 /* XXX: extra loop to check we really don't overlap preallocations */
3071 rcu_read_lock();
9a0762c5 3072 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
c9de560d 3073 unsigned long pa_end;
c9de560d
AT
3074 spin_lock(&pa->pa_lock);
3075 if (pa->pa_deleted == 0) {
3076 pa_end = pa->pa_lstart + pa->pa_len;
3077 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3078 }
3079 spin_unlock(&pa->pa_lock);
3080 }
3081 rcu_read_unlock();
3082
3083 if (start + size <= ac->ac_o_ex.fe_logical &&
3084 start > ac->ac_o_ex.fe_logical) {
3085 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3086 (unsigned long) start, (unsigned long) size,
3087 (unsigned long) ac->ac_o_ex.fe_logical);
3088 }
3089 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3090 start > ac->ac_o_ex.fe_logical);
3091 BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3092
3093 /* now prepare goal request */
3094
3095 /* XXX: is it better to align blocks WRT to logical
3096 * placement or satisfy big request as is */
3097 ac->ac_g_ex.fe_logical = start;
3098 ac->ac_g_ex.fe_len = size;
3099
3100 /* define goal start in order to merge */
3101 if (ar->pright && (ar->lright == (start + size))) {
3102 /* merge to the right */
3103 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3104 &ac->ac_f_ex.fe_group,
3105 &ac->ac_f_ex.fe_start);
3106 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3107 }
3108 if (ar->pleft && (ar->lleft + 1 == start)) {
3109 /* merge to the left */
3110 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3111 &ac->ac_f_ex.fe_group,
3112 &ac->ac_f_ex.fe_start);
3113 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3114 }
3115
3116 mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3117 (unsigned) orig_size, (unsigned) start);
3118}
3119
3120static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3121{
3122 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3123
3124 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3125 atomic_inc(&sbi->s_bal_reqs);
3126 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3127 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3128 atomic_inc(&sbi->s_bal_success);
3129 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3130 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3131 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3132 atomic_inc(&sbi->s_bal_goals);
3133 if (ac->ac_found > sbi->s_mb_max_to_scan)
3134 atomic_inc(&sbi->s_bal_breaks);
3135 }
3136
3137 ext4_mb_store_history(ac);
3138}
3139
3140/*
3141 * use blocks preallocated to inode
3142 */
3143static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3144 struct ext4_prealloc_space *pa)
3145{
3146 ext4_fsblk_t start;
3147 ext4_fsblk_t end;
3148 int len;
3149
3150 /* found preallocated blocks, use them */
3151 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3152 end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3153 len = end - start;
3154 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3155 &ac->ac_b_ex.fe_start);
3156 ac->ac_b_ex.fe_len = len;
3157 ac->ac_status = AC_STATUS_FOUND;
3158 ac->ac_pa = pa;
3159
3160 BUG_ON(start < pa->pa_pstart);
3161 BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3162 BUG_ON(pa->pa_free < len);
3163 pa->pa_free -= len;
3164
60bd63d1 3165 mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
c9de560d
AT
3166}
3167
3168/*
3169 * use blocks preallocated to locality group
3170 */
3171static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3172 struct ext4_prealloc_space *pa)
3173{
03cddb80 3174 unsigned int len = ac->ac_o_ex.fe_len;
6be2ded1 3175
c9de560d
AT
3176 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3177 &ac->ac_b_ex.fe_group,
3178 &ac->ac_b_ex.fe_start);
3179 ac->ac_b_ex.fe_len = len;
3180 ac->ac_status = AC_STATUS_FOUND;
3181 ac->ac_pa = pa;
3182
3183 /* we don't correct pa_pstart or pa_plen here to avoid
26346ff6 3184 * possible race when the group is being loaded concurrently
c9de560d 3185 * instead we correct pa later, after blocks are marked
26346ff6
AK
3186 * in on-disk bitmap -- see ext4_mb_release_context()
3187 * Other CPUs are prevented from allocating from this pa by lg_mutex
c9de560d
AT
3188 */
3189 mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3190}
3191
5e745b04
AK
3192/*
3193 * Return the prealloc space that have minimal distance
3194 * from the goal block. @cpa is the prealloc
3195 * space that is having currently known minimal distance
3196 * from the goal block.
3197 */
3198static struct ext4_prealloc_space *
3199ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3200 struct ext4_prealloc_space *pa,
3201 struct ext4_prealloc_space *cpa)
3202{
3203 ext4_fsblk_t cur_distance, new_distance;
3204
3205 if (cpa == NULL) {
3206 atomic_inc(&pa->pa_count);
3207 return pa;
3208 }
3209 cur_distance = abs(goal_block - cpa->pa_pstart);
3210 new_distance = abs(goal_block - pa->pa_pstart);
3211
3212 if (cur_distance < new_distance)
3213 return cpa;
3214
3215 /* drop the previous reference */
3216 atomic_dec(&cpa->pa_count);
3217 atomic_inc(&pa->pa_count);
3218 return pa;
3219}
3220
c9de560d
AT
3221/*
3222 * search goal blocks in preallocated space
3223 */
4ddfef7b
ES
3224static noinline_for_stack int
3225ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
c9de560d 3226{
6be2ded1 3227 int order, i;
c9de560d
AT
3228 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3229 struct ext4_locality_group *lg;
5e745b04
AK
3230 struct ext4_prealloc_space *pa, *cpa = NULL;
3231 ext4_fsblk_t goal_block;
c9de560d
AT
3232
3233 /* only data can be preallocated */
3234 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3235 return 0;
3236
3237 /* first, try per-file preallocation */
3238 rcu_read_lock();
9a0762c5 3239 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
c9de560d
AT
3240
3241 /* all fields in this condition don't change,
3242 * so we can skip locking for them */
3243 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3244 ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3245 continue;
3246
3247 /* found preallocated blocks, use them */
3248 spin_lock(&pa->pa_lock);
3249 if (pa->pa_deleted == 0 && pa->pa_free) {
3250 atomic_inc(&pa->pa_count);
3251 ext4_mb_use_inode_pa(ac, pa);
3252 spin_unlock(&pa->pa_lock);
3253 ac->ac_criteria = 10;
3254 rcu_read_unlock();
3255 return 1;
3256 }
3257 spin_unlock(&pa->pa_lock);
3258 }
3259 rcu_read_unlock();
3260
3261 /* can we use group allocation? */
3262 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3263 return 0;
3264
3265 /* inode may have no locality group for some reason */
3266 lg = ac->ac_lg;
3267 if (lg == NULL)
3268 return 0;
6be2ded1
AK
3269 order = fls(ac->ac_o_ex.fe_len) - 1;
3270 if (order > PREALLOC_TB_SIZE - 1)
3271 /* The max size of hash table is PREALLOC_TB_SIZE */
3272 order = PREALLOC_TB_SIZE - 1;
3273
5e745b04
AK
3274 goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
3275 ac->ac_g_ex.fe_start +
3276 le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3277 /*
3278 * search for the prealloc space that is having
3279 * minimal distance from the goal block.
3280 */
6be2ded1
AK
3281 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3282 rcu_read_lock();
3283 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3284 pa_inode_list) {
3285 spin_lock(&pa->pa_lock);
3286 if (pa->pa_deleted == 0 &&
3287 pa->pa_free >= ac->ac_o_ex.fe_len) {
5e745b04
AK
3288
3289 cpa = ext4_mb_check_group_pa(goal_block,
3290 pa, cpa);
6be2ded1 3291 }
c9de560d 3292 spin_unlock(&pa->pa_lock);
c9de560d 3293 }
6be2ded1 3294 rcu_read_unlock();
c9de560d 3295 }
5e745b04
AK
3296 if (cpa) {
3297 ext4_mb_use_group_pa(ac, cpa);
3298 ac->ac_criteria = 20;
3299 return 1;
3300 }
c9de560d
AT
3301 return 0;
3302}
3303
3304/*
3305 * the function goes through all preallocation in this group and marks them
3306 * used in in-core bitmap. buddy must be generated from this bitmap
3307 * Need to be called with ext4 group lock (ext4_lock_group)
3308 */
3309static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3310 ext4_group_t group)
3311{
3312 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3313 struct ext4_prealloc_space *pa;
3314 struct list_head *cur;
3315 ext4_group_t groupnr;
3316 ext4_grpblk_t start;
3317 int preallocated = 0;
3318 int count = 0;
3319 int len;
3320
3321 /* all form of preallocation discards first load group,
3322 * so the only competing code is preallocation use.
3323 * we don't need any locking here
3324 * notice we do NOT ignore preallocations with pa_deleted
3325 * otherwise we could leave used blocks available for
3326 * allocation in buddy when concurrent ext4_mb_put_pa()
3327 * is dropping preallocation
3328 */
3329 list_for_each(cur, &grp->bb_prealloc_list) {
3330 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3331 spin_lock(&pa->pa_lock);
3332 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3333 &groupnr, &start);
3334 len = pa->pa_len;
3335 spin_unlock(&pa->pa_lock);
3336 if (unlikely(len == 0))
3337 continue;
3338 BUG_ON(groupnr != group);
3339 mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3340 bitmap, start, len);
3341 preallocated += len;
3342 count++;
3343 }
3344 mb_debug("prellocated %u for group %lu\n", preallocated, group);
3345}
3346
3347static void ext4_mb_pa_callback(struct rcu_head *head)
3348{
3349 struct ext4_prealloc_space *pa;
3350 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3351 kmem_cache_free(ext4_pspace_cachep, pa);
3352}
3353
3354/*
3355 * drops a reference to preallocated space descriptor
3356 * if this was the last reference and the space is consumed
3357 */
3358static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3359 struct super_block *sb, struct ext4_prealloc_space *pa)
3360{
3361 unsigned long grp;
3362
3363 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3364 return;
3365
3366 /* in this short window concurrent discard can set pa_deleted */
3367 spin_lock(&pa->pa_lock);
3368 if (pa->pa_deleted == 1) {
3369 spin_unlock(&pa->pa_lock);
3370 return;
3371 }
3372
3373 pa->pa_deleted = 1;
3374 spin_unlock(&pa->pa_lock);
3375
3376 /* -1 is to protect from crossing allocation group */
3377 ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3378
3379 /*
3380 * possible race:
3381 *
3382 * P1 (buddy init) P2 (regular allocation)
3383 * find block B in PA
3384 * copy on-disk bitmap to buddy
3385 * mark B in on-disk bitmap
3386 * drop PA from group
3387 * mark all PAs in buddy
3388 *
3389 * thus, P1 initializes buddy with B available. to prevent this
3390 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3391 * against that pair
3392 */
3393 ext4_lock_group(sb, grp);
3394 list_del(&pa->pa_group_list);
3395 ext4_unlock_group(sb, grp);
3396
3397 spin_lock(pa->pa_obj_lock);
3398 list_del_rcu(&pa->pa_inode_list);
3399 spin_unlock(pa->pa_obj_lock);
3400
3401 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3402}
3403
3404/*
3405 * creates new preallocated space for given inode
3406 */
4ddfef7b
ES
3407static noinline_for_stack int
3408ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3409{
3410 struct super_block *sb = ac->ac_sb;
3411 struct ext4_prealloc_space *pa;
3412 struct ext4_group_info *grp;
3413 struct ext4_inode_info *ei;
3414
3415 /* preallocate only when found space is larger then requested */
3416 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3417 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3418 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3419
3420 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3421 if (pa == NULL)
3422 return -ENOMEM;
3423
3424 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3425 int winl;
3426 int wins;
3427 int win;
3428 int offs;
3429
3430 /* we can't allocate as much as normalizer wants.
3431 * so, found space must get proper lstart
3432 * to cover original request */
3433 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3434 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3435
3436 /* we're limited by original request in that
3437 * logical block must be covered any way
3438 * winl is window we can move our chunk within */
3439 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3440
3441 /* also, we should cover whole original request */
3442 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3443
3444 /* the smallest one defines real window */
3445 win = min(winl, wins);
3446
3447 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3448 if (offs && offs < win)
3449 win = offs;
3450
3451 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3452 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3453 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3454 }
3455
3456 /* preallocation can change ac_b_ex, thus we store actually
3457 * allocated blocks for history */
3458 ac->ac_f_ex = ac->ac_b_ex;
3459
3460 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3461 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3462 pa->pa_len = ac->ac_b_ex.fe_len;
3463 pa->pa_free = pa->pa_len;
3464 atomic_set(&pa->pa_count, 1);
3465 spin_lock_init(&pa->pa_lock);
3466 pa->pa_deleted = 0;
3467 pa->pa_linear = 0;
3468
3469 mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3470 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3471
3472 ext4_mb_use_inode_pa(ac, pa);
3473 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3474
3475 ei = EXT4_I(ac->ac_inode);
3476 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3477
3478 pa->pa_obj_lock = &ei->i_prealloc_lock;
3479 pa->pa_inode = ac->ac_inode;
3480
3481 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3482 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3483 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3484
3485 spin_lock(pa->pa_obj_lock);
3486 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3487 spin_unlock(pa->pa_obj_lock);
3488
3489 return 0;
3490}
3491
3492/*
3493 * creates new preallocated space for locality group inodes belongs to
3494 */
4ddfef7b
ES
3495static noinline_for_stack int
3496ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
c9de560d
AT
3497{
3498 struct super_block *sb = ac->ac_sb;
3499 struct ext4_locality_group *lg;
3500 struct ext4_prealloc_space *pa;
3501 struct ext4_group_info *grp;
3502
3503 /* preallocate only when found space is larger then requested */
3504 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3505 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3506 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3507
3508 BUG_ON(ext4_pspace_cachep == NULL);
3509 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3510 if (pa == NULL)
3511 return -ENOMEM;
3512
3513 /* preallocation can change ac_b_ex, thus we store actually
3514 * allocated blocks for history */
3515 ac->ac_f_ex = ac->ac_b_ex;
3516
3517 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3518 pa->pa_lstart = pa->pa_pstart;
3519 pa->pa_len = ac->ac_b_ex.fe_len;
3520 pa->pa_free = pa->pa_len;
3521 atomic_set(&pa->pa_count, 1);
3522 spin_lock_init(&pa->pa_lock);
6be2ded1 3523 INIT_LIST_HEAD(&pa->pa_inode_list);
c9de560d
AT
3524 pa->pa_deleted = 0;
3525 pa->pa_linear = 1;
3526
3527 mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3528 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3529
3530 ext4_mb_use_group_pa(ac, pa);
3531 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3532
3533 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3534 lg = ac->ac_lg;
3535 BUG_ON(lg == NULL);
3536
3537 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3538 pa->pa_inode = NULL;
3539
3540 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3541 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3542 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3543
6be2ded1
AK
3544 /*
3545 * We will later add the new pa to the right bucket
3546 * after updating the pa_free in ext4_mb_release_context
3547 */
c9de560d
AT
3548 return 0;
3549}
3550
3551static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3552{
3553 int err;
3554
3555 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3556 err = ext4_mb_new_group_pa(ac);
3557 else
3558 err = ext4_mb_new_inode_pa(ac);
3559 return err;
3560}
3561
3562/*
3563 * finds all unused blocks in on-disk bitmap, frees them in
3564 * in-core bitmap and buddy.
3565 * @pa must be unlinked from inode and group lists, so that
3566 * nobody else can find/use it.
3567 * the caller MUST hold group/inode locks.
3568 * TODO: optimize the case when there are no in-core structures yet
3569 */
4ddfef7b
ES
3570static noinline_for_stack int
3571ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
c83617db
AK
3572 struct ext4_prealloc_space *pa,
3573 struct ext4_allocation_context *ac)
c9de560d 3574{
c9de560d
AT
3575 struct super_block *sb = e4b->bd_sb;
3576 struct ext4_sb_info *sbi = EXT4_SB(sb);
3577 unsigned long end;
3578 unsigned long next;
3579 ext4_group_t group;
3580 ext4_grpblk_t bit;
3581 sector_t start;
3582 int err = 0;
3583 int free = 0;
3584
3585 BUG_ON(pa->pa_deleted == 0);
3586 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3587 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3588 end = bit + pa->pa_len;
3589
256bdb49
ES
3590 if (ac) {
3591 ac->ac_sb = sb;
3592 ac->ac_inode = pa->pa_inode;
3593 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3594 }
c9de560d
AT
3595
3596 while (bit < end) {
ffad0a44 3597 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3598 if (bit >= end)
3599 break;
ffad0a44 3600 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
3601 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3602 le32_to_cpu(sbi->s_es->s_first_data_block);
3603 mb_debug(" free preallocated %u/%u in group %u\n",
3604 (unsigned) start, (unsigned) next - bit,
3605 (unsigned) group);
3606 free += next - bit;
3607
256bdb49
ES
3608 if (ac) {
3609 ac->ac_b_ex.fe_group = group;
3610 ac->ac_b_ex.fe_start = bit;
3611 ac->ac_b_ex.fe_len = next - bit;
3612 ac->ac_b_ex.fe_logical = 0;
3613 ext4_mb_store_history(ac);
3614 }
c9de560d
AT
3615
3616 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3617 bit = next + 1;
3618 }
3619 if (free != pa->pa_free) {
26346ff6 3620 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
c9de560d
AT
3621 pa, (unsigned long) pa->pa_lstart,
3622 (unsigned long) pa->pa_pstart,
3623 (unsigned long) pa->pa_len);
46e665e9 3624 ext4_error(sb, __func__, "free %u, pa_free %u\n",
26346ff6 3625 free, pa->pa_free);
e56eb659
AK
3626 /*
3627 * pa is already deleted so we use the value obtained
3628 * from the bitmap and continue.
3629 */
c9de560d 3630 }
c9de560d
AT
3631 atomic_add(free, &sbi->s_mb_discarded);
3632
3633 return err;
3634}
3635
4ddfef7b
ES
3636static noinline_for_stack int
3637ext4_mb_release_group_pa(struct ext4_buddy *e4b,
c83617db
AK
3638 struct ext4_prealloc_space *pa,
3639 struct ext4_allocation_context *ac)
c9de560d 3640{
c9de560d
AT
3641 struct super_block *sb = e4b->bd_sb;
3642 ext4_group_t group;
3643 ext4_grpblk_t bit;
3644
256bdb49
ES
3645 if (ac)
3646 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
c9de560d
AT
3647
3648 BUG_ON(pa->pa_deleted == 0);
3649 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3650 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3651 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3652 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3653
256bdb49
ES
3654 if (ac) {
3655 ac->ac_sb = sb;
3656 ac->ac_inode = NULL;
3657 ac->ac_b_ex.fe_group = group;
3658 ac->ac_b_ex.fe_start = bit;
3659 ac->ac_b_ex.fe_len = pa->pa_len;
3660 ac->ac_b_ex.fe_logical = 0;
3661 ext4_mb_store_history(ac);
256bdb49 3662 }
c9de560d
AT
3663
3664 return 0;
3665}
3666
3667/*
3668 * releases all preallocations in given group
3669 *
3670 * first, we need to decide discard policy:
3671 * - when do we discard
3672 * 1) ENOSPC
3673 * - how many do we discard
3674 * 1) how many requested
3675 */
4ddfef7b
ES
3676static noinline_for_stack int
3677ext4_mb_discard_group_preallocations(struct super_block *sb,
c9de560d
AT
3678 ext4_group_t group, int needed)
3679{
3680 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3681 struct buffer_head *bitmap_bh = NULL;
3682 struct ext4_prealloc_space *pa, *tmp;
c83617db 3683 struct ext4_allocation_context *ac;
c9de560d
AT
3684 struct list_head list;
3685 struct ext4_buddy e4b;
3686 int err;
3687 int busy = 0;
3688 int free = 0;
3689
3690 mb_debug("discard preallocation for group %lu\n", group);
3691
3692 if (list_empty(&grp->bb_prealloc_list))
3693 return 0;
3694
574ca174 3695 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3696 if (bitmap_bh == NULL) {
ce89f46c
AK
3697 ext4_error(sb, __func__, "Error in reading block "
3698 "bitmap for %lu\n", group);
3699 return 0;
c9de560d
AT
3700 }
3701
3702 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c
AK
3703 if (err) {
3704 ext4_error(sb, __func__, "Error in loading buddy "
3705 "information for %lu\n", group);
3706 put_bh(bitmap_bh);
3707 return 0;
3708 }
c9de560d
AT
3709
3710 if (needed == 0)
3711 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3712
c9de560d 3713 INIT_LIST_HEAD(&list);
c83617db 3714 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
c9de560d
AT
3715repeat:
3716 ext4_lock_group(sb, group);
3717 list_for_each_entry_safe(pa, tmp,
3718 &grp->bb_prealloc_list, pa_group_list) {
3719 spin_lock(&pa->pa_lock);
3720 if (atomic_read(&pa->pa_count)) {
3721 spin_unlock(&pa->pa_lock);
3722 busy = 1;
3723 continue;
3724 }
3725 if (pa->pa_deleted) {
3726 spin_unlock(&pa->pa_lock);
3727 continue;
3728 }
3729
3730 /* seems this one can be freed ... */
3731 pa->pa_deleted = 1;
3732
3733 /* we can trust pa_free ... */
3734 free += pa->pa_free;
3735
3736 spin_unlock(&pa->pa_lock);
3737
3738 list_del(&pa->pa_group_list);
3739 list_add(&pa->u.pa_tmp_list, &list);
3740 }
3741
3742 /* if we still need more blocks and some PAs were used, try again */
3743 if (free < needed && busy) {
3744 busy = 0;
3745 ext4_unlock_group(sb, group);
3746 /*
3747 * Yield the CPU here so that we don't get soft lockup
3748 * in non preempt case.
3749 */
3750 yield();
3751 goto repeat;
3752 }
3753
3754 /* found anything to free? */
3755 if (list_empty(&list)) {
3756 BUG_ON(free != 0);
3757 goto out;
3758 }
3759
3760 /* now free all selected PAs */
3761 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3762
3763 /* remove from object (inode or locality group) */
3764 spin_lock(pa->pa_obj_lock);
3765 list_del_rcu(&pa->pa_inode_list);
3766 spin_unlock(pa->pa_obj_lock);
3767
3768 if (pa->pa_linear)
c83617db 3769 ext4_mb_release_group_pa(&e4b, pa, ac);
c9de560d 3770 else
c83617db 3771 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
c9de560d
AT
3772
3773 list_del(&pa->u.pa_tmp_list);
3774 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3775 }
3776
3777out:
3778 ext4_unlock_group(sb, group);
c83617db
AK
3779 if (ac)
3780 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
3781 ext4_mb_release_desc(&e4b);
3782 put_bh(bitmap_bh);
3783 return free;
3784}
3785
3786/*
3787 * releases all non-used preallocated blocks for given inode
3788 *
3789 * It's important to discard preallocations under i_data_sem
3790 * We don't want another block to be served from the prealloc
3791 * space when we are discarding the inode prealloc space.
3792 *
3793 * FIXME!! Make sure it is valid at all the call sites
3794 */
c2ea3fde 3795void ext4_discard_preallocations(struct inode *inode)
c9de560d
AT
3796{
3797 struct ext4_inode_info *ei = EXT4_I(inode);
3798 struct super_block *sb = inode->i_sb;
3799 struct buffer_head *bitmap_bh = NULL;
3800 struct ext4_prealloc_space *pa, *tmp;
c83617db 3801 struct ext4_allocation_context *ac;
c9de560d
AT
3802 ext4_group_t group = 0;
3803 struct list_head list;
3804 struct ext4_buddy e4b;
3805 int err;
3806
c2ea3fde 3807 if (!S_ISREG(inode->i_mode)) {
c9de560d
AT
3808 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3809 return;
3810 }
3811
3812 mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
3813
3814 INIT_LIST_HEAD(&list);
3815
c83617db 3816 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
c9de560d
AT
3817repeat:
3818 /* first, collect all pa's in the inode */
3819 spin_lock(&ei->i_prealloc_lock);
3820 while (!list_empty(&ei->i_prealloc_list)) {
3821 pa = list_entry(ei->i_prealloc_list.next,
3822 struct ext4_prealloc_space, pa_inode_list);
3823 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3824 spin_lock(&pa->pa_lock);
3825 if (atomic_read(&pa->pa_count)) {
3826 /* this shouldn't happen often - nobody should
3827 * use preallocation while we're discarding it */
3828 spin_unlock(&pa->pa_lock);
3829 spin_unlock(&ei->i_prealloc_lock);
3830 printk(KERN_ERR "uh-oh! used pa while discarding\n");
3831 WARN_ON(1);
3832 schedule_timeout_uninterruptible(HZ);
3833 goto repeat;
3834
3835 }
3836 if (pa->pa_deleted == 0) {
3837 pa->pa_deleted = 1;
3838 spin_unlock(&pa->pa_lock);
3839 list_del_rcu(&pa->pa_inode_list);
3840 list_add(&pa->u.pa_tmp_list, &list);
3841 continue;
3842 }
3843
3844 /* someone is deleting pa right now */
3845 spin_unlock(&pa->pa_lock);
3846 spin_unlock(&ei->i_prealloc_lock);
3847
3848 /* we have to wait here because pa_deleted
3849 * doesn't mean pa is already unlinked from
3850 * the list. as we might be called from
3851 * ->clear_inode() the inode will get freed
3852 * and concurrent thread which is unlinking
3853 * pa from inode's list may access already
3854 * freed memory, bad-bad-bad */
3855
3856 /* XXX: if this happens too often, we can
3857 * add a flag to force wait only in case
3858 * of ->clear_inode(), but not in case of
3859 * regular truncate */
3860 schedule_timeout_uninterruptible(HZ);
3861 goto repeat;
3862 }
3863 spin_unlock(&ei->i_prealloc_lock);
3864
3865 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3866 BUG_ON(pa->pa_linear != 0);
3867 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3868
3869 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c
AK
3870 if (err) {
3871 ext4_error(sb, __func__, "Error in loading buddy "
3872 "information for %lu\n", group);
3873 continue;
3874 }
c9de560d 3875
574ca174 3876 bitmap_bh = ext4_read_block_bitmap(sb, group);
c9de560d 3877 if (bitmap_bh == NULL) {
ce89f46c
AK
3878 ext4_error(sb, __func__, "Error in reading block "
3879 "bitmap for %lu\n", group);
c9de560d 3880 ext4_mb_release_desc(&e4b);
ce89f46c 3881 continue;
c9de560d
AT
3882 }
3883
3884 ext4_lock_group(sb, group);
3885 list_del(&pa->pa_group_list);
c83617db 3886 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
c9de560d
AT
3887 ext4_unlock_group(sb, group);
3888
3889 ext4_mb_release_desc(&e4b);
3890 put_bh(bitmap_bh);
3891
3892 list_del(&pa->u.pa_tmp_list);
3893 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3894 }
c83617db
AK
3895 if (ac)
3896 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
3897}
3898
3899/*
3900 * finds all preallocated spaces and return blocks being freed to them
3901 * if preallocated space becomes full (no block is used from the space)
3902 * then the function frees space in buddy
3903 * XXX: at the moment, truncate (which is the only way to free blocks)
3904 * discards all preallocations
3905 */
3906static void ext4_mb_return_to_preallocation(struct inode *inode,
3907 struct ext4_buddy *e4b,
3908 sector_t block, int count)
3909{
3910 BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
3911}
3912#ifdef MB_DEBUG
3913static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3914{
3915 struct super_block *sb = ac->ac_sb;
3916 ext4_group_t i;
3917
3918 printk(KERN_ERR "EXT4-fs: Can't allocate:"
3919 " Allocation context details:\n");
3920 printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
3921 ac->ac_status, ac->ac_flags);
3922 printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
3923 "best %lu/%lu/%lu@%lu cr %d\n",
3924 (unsigned long)ac->ac_o_ex.fe_group,
3925 (unsigned long)ac->ac_o_ex.fe_start,
3926 (unsigned long)ac->ac_o_ex.fe_len,
3927 (unsigned long)ac->ac_o_ex.fe_logical,
3928 (unsigned long)ac->ac_g_ex.fe_group,
3929 (unsigned long)ac->ac_g_ex.fe_start,
3930 (unsigned long)ac->ac_g_ex.fe_len,
3931 (unsigned long)ac->ac_g_ex.fe_logical,
3932 (unsigned long)ac->ac_b_ex.fe_group,
3933 (unsigned long)ac->ac_b_ex.fe_start,
3934 (unsigned long)ac->ac_b_ex.fe_len,
3935 (unsigned long)ac->ac_b_ex.fe_logical,
3936 (int)ac->ac_criteria);
3937 printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
3938 ac->ac_found);
3939 printk(KERN_ERR "EXT4-fs: groups: \n");
3940 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
3941 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3942 struct ext4_prealloc_space *pa;
3943 ext4_grpblk_t start;
3944 struct list_head *cur;
3945 ext4_lock_group(sb, i);
3946 list_for_each(cur, &grp->bb_prealloc_list) {
3947 pa = list_entry(cur, struct ext4_prealloc_space,
3948 pa_group_list);
3949 spin_lock(&pa->pa_lock);
3950 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3951 NULL, &start);
3952 spin_unlock(&pa->pa_lock);
3953 printk(KERN_ERR "PA:%lu:%d:%u \n", i,
3954 start, pa->pa_len);
3955 }
60bd63d1 3956 ext4_unlock_group(sb, i);
c9de560d
AT
3957
3958 if (grp->bb_free == 0)
3959 continue;
3960 printk(KERN_ERR "%lu: %d/%d \n",
3961 i, grp->bb_free, grp->bb_fragments);
3962 }
3963 printk(KERN_ERR "\n");
3964}
3965#else
3966static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3967{
3968 return;
3969}
3970#endif
3971
3972/*
3973 * We use locality group preallocation for small size file. The size of the
3974 * file is determined by the current size or the resulting size after
3975 * allocation which ever is larger
3976 *
3977 * One can tune this size via /proc/fs/ext4/<partition>/stream_req
3978 */
3979static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3980{
3981 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3982 int bsbits = ac->ac_sb->s_blocksize_bits;
3983 loff_t size, isize;
3984
3985 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3986 return;
3987
3988 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3989 isize = i_size_read(ac->ac_inode) >> bsbits;
3990 size = max(size, isize);
3991
3992 /* don't use group allocation for large files */
3993 if (size >= sbi->s_mb_stream_request)
3994 return;
3995
3996 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3997 return;
3998
3999 BUG_ON(ac->ac_lg != NULL);
4000 /*
4001 * locality group prealloc space are per cpu. The reason for having
4002 * per cpu locality group is to reduce the contention between block
4003 * request from multiple CPUs.
4004 */
730c213c 4005 ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
c9de560d
AT
4006
4007 /* we're going to use group allocation */
4008 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4009
4010 /* serialize all allocations in the group */
4011 mutex_lock(&ac->ac_lg->lg_mutex);
4012}
4013
4ddfef7b
ES
4014static noinline_for_stack int
4015ext4_mb_initialize_context(struct ext4_allocation_context *ac,
c9de560d
AT
4016 struct ext4_allocation_request *ar)
4017{
4018 struct super_block *sb = ar->inode->i_sb;
4019 struct ext4_sb_info *sbi = EXT4_SB(sb);
4020 struct ext4_super_block *es = sbi->s_es;
4021 ext4_group_t group;
4022 unsigned long len;
4023 unsigned long goal;
4024 ext4_grpblk_t block;
4025
4026 /* we can't allocate > group size */
4027 len = ar->len;
4028
4029 /* just a dirty hack to filter too big requests */
4030 if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4031 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4032
4033 /* start searching from the goal */
4034 goal = ar->goal;
4035 if (goal < le32_to_cpu(es->s_first_data_block) ||
4036 goal >= ext4_blocks_count(es))
4037 goal = le32_to_cpu(es->s_first_data_block);
4038 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4039
4040 /* set up allocation goals */
4041 ac->ac_b_ex.fe_logical = ar->logical;
4042 ac->ac_b_ex.fe_group = 0;
4043 ac->ac_b_ex.fe_start = 0;
4044 ac->ac_b_ex.fe_len = 0;
4045 ac->ac_status = AC_STATUS_CONTINUE;
4046 ac->ac_groups_scanned = 0;
4047 ac->ac_ex_scanned = 0;
4048 ac->ac_found = 0;
4049 ac->ac_sb = sb;
4050 ac->ac_inode = ar->inode;
4051 ac->ac_o_ex.fe_logical = ar->logical;
4052 ac->ac_o_ex.fe_group = group;
4053 ac->ac_o_ex.fe_start = block;
4054 ac->ac_o_ex.fe_len = len;
4055 ac->ac_g_ex.fe_logical = ar->logical;
4056 ac->ac_g_ex.fe_group = group;
4057 ac->ac_g_ex.fe_start = block;
4058 ac->ac_g_ex.fe_len = len;
4059 ac->ac_f_ex.fe_len = 0;
4060 ac->ac_flags = ar->flags;
4061 ac->ac_2order = 0;
4062 ac->ac_criteria = 0;
4063 ac->ac_pa = NULL;
4064 ac->ac_bitmap_page = NULL;
4065 ac->ac_buddy_page = NULL;
4066 ac->ac_lg = NULL;
4067
4068 /* we have to define context: we'll we work with a file or
4069 * locality group. this is a policy, actually */
4070 ext4_mb_group_or_file(ac);
4071
4072 mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4073 "left: %u/%u, right %u/%u to %swritable\n",
4074 (unsigned) ar->len, (unsigned) ar->logical,
4075 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4076 (unsigned) ar->lleft, (unsigned) ar->pleft,
4077 (unsigned) ar->lright, (unsigned) ar->pright,
4078 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4079 return 0;
4080
4081}
4082
6be2ded1
AK
4083static noinline_for_stack void
4084ext4_mb_discard_lg_preallocations(struct super_block *sb,
4085 struct ext4_locality_group *lg,
4086 int order, int total_entries)
4087{
4088 ext4_group_t group = 0;
4089 struct ext4_buddy e4b;
4090 struct list_head discard_list;
4091 struct ext4_prealloc_space *pa, *tmp;
4092 struct ext4_allocation_context *ac;
4093
4094 mb_debug("discard locality group preallocation\n");
4095
4096 INIT_LIST_HEAD(&discard_list);
4097 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4098
4099 spin_lock(&lg->lg_prealloc_lock);
4100 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4101 pa_inode_list) {
4102 spin_lock(&pa->pa_lock);
4103 if (atomic_read(&pa->pa_count)) {
4104 /*
4105 * This is the pa that we just used
4106 * for block allocation. So don't
4107 * free that
4108 */
4109 spin_unlock(&pa->pa_lock);
4110 continue;
4111 }
4112 if (pa->pa_deleted) {
4113 spin_unlock(&pa->pa_lock);
4114 continue;
4115 }
4116 /* only lg prealloc space */
4117 BUG_ON(!pa->pa_linear);
4118
4119 /* seems this one can be freed ... */
4120 pa->pa_deleted = 1;
4121 spin_unlock(&pa->pa_lock);
4122
4123 list_del_rcu(&pa->pa_inode_list);
4124 list_add(&pa->u.pa_tmp_list, &discard_list);
4125
4126 total_entries--;
4127 if (total_entries <= 5) {
4128 /*
4129 * we want to keep only 5 entries
4130 * allowing it to grow to 8. This
4131 * mak sure we don't call discard
4132 * soon for this list.
4133 */
4134 break;
4135 }
4136 }
4137 spin_unlock(&lg->lg_prealloc_lock);
4138
4139 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4140
4141 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4142 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4143 ext4_error(sb, __func__, "Error in loading buddy "
4144 "information for %lu\n", group);
4145 continue;
4146 }
4147 ext4_lock_group(sb, group);
4148 list_del(&pa->pa_group_list);
4149 ext4_mb_release_group_pa(&e4b, pa, ac);
4150 ext4_unlock_group(sb, group);
4151
4152 ext4_mb_release_desc(&e4b);
4153 list_del(&pa->u.pa_tmp_list);
4154 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4155 }
4156 if (ac)
4157 kmem_cache_free(ext4_ac_cachep, ac);
4158}
4159
4160/*
4161 * We have incremented pa_count. So it cannot be freed at this
4162 * point. Also we hold lg_mutex. So no parallel allocation is
4163 * possible from this lg. That means pa_free cannot be updated.
4164 *
4165 * A parallel ext4_mb_discard_group_preallocations is possible.
4166 * which can cause the lg_prealloc_list to be updated.
4167 */
4168
4169static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4170{
4171 int order, added = 0, lg_prealloc_count = 1;
4172 struct super_block *sb = ac->ac_sb;
4173 struct ext4_locality_group *lg = ac->ac_lg;
4174 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4175
4176 order = fls(pa->pa_free) - 1;
4177 if (order > PREALLOC_TB_SIZE - 1)
4178 /* The max size of hash table is PREALLOC_TB_SIZE */
4179 order = PREALLOC_TB_SIZE - 1;
4180 /* Add the prealloc space to lg */
4181 rcu_read_lock();
4182 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4183 pa_inode_list) {
4184 spin_lock(&tmp_pa->pa_lock);
4185 if (tmp_pa->pa_deleted) {
4186 spin_unlock(&pa->pa_lock);
4187 continue;
4188 }
4189 if (!added && pa->pa_free < tmp_pa->pa_free) {
4190 /* Add to the tail of the previous entry */
4191 list_add_tail_rcu(&pa->pa_inode_list,
4192 &tmp_pa->pa_inode_list);
4193 added = 1;
4194 /*
4195 * we want to count the total
4196 * number of entries in the list
4197 */
4198 }
4199 spin_unlock(&tmp_pa->pa_lock);
4200 lg_prealloc_count++;
4201 }
4202 if (!added)
4203 list_add_tail_rcu(&pa->pa_inode_list,
4204 &lg->lg_prealloc_list[order]);
4205 rcu_read_unlock();
4206
4207 /* Now trim the list to be not more than 8 elements */
4208 if (lg_prealloc_count > 8) {
4209 ext4_mb_discard_lg_preallocations(sb, lg,
4210 order, lg_prealloc_count);
4211 return;
4212 }
4213 return ;
4214}
4215
c9de560d
AT
4216/*
4217 * release all resource we used in allocation
4218 */
4219static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4220{
6be2ded1
AK
4221 struct ext4_prealloc_space *pa = ac->ac_pa;
4222 if (pa) {
4223 if (pa->pa_linear) {
c9de560d 4224 /* see comment in ext4_mb_use_group_pa() */
6be2ded1
AK
4225 spin_lock(&pa->pa_lock);
4226 pa->pa_pstart += ac->ac_b_ex.fe_len;
4227 pa->pa_lstart += ac->ac_b_ex.fe_len;
4228 pa->pa_free -= ac->ac_b_ex.fe_len;
4229 pa->pa_len -= ac->ac_b_ex.fe_len;
4230 spin_unlock(&pa->pa_lock);
4231 /*
4232 * We want to add the pa to the right bucket.
4233 * Remove it from the list and while adding
4234 * make sure the list to which we are adding
4235 * doesn't grow big.
4236 */
4237 if (likely(pa->pa_free)) {
4238 spin_lock(pa->pa_obj_lock);
4239 list_del_rcu(&pa->pa_inode_list);
4240 spin_unlock(pa->pa_obj_lock);
4241 ext4_mb_add_n_trim(ac);
4242 }
c9de560d 4243 }
6be2ded1 4244 ext4_mb_put_pa(ac, ac->ac_sb, pa);
c9de560d
AT
4245 }
4246 if (ac->ac_bitmap_page)
4247 page_cache_release(ac->ac_bitmap_page);
4248 if (ac->ac_buddy_page)
4249 page_cache_release(ac->ac_buddy_page);
4250 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4251 mutex_unlock(&ac->ac_lg->lg_mutex);
4252 ext4_mb_collect_stats(ac);
4253 return 0;
4254}
4255
4256static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4257{
4258 ext4_group_t i;
4259 int ret;
4260 int freed = 0;
4261
4262 for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4263 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4264 freed += ret;
4265 needed -= ret;
4266 }
4267
4268 return freed;
4269}
4270
4271/*
4272 * Main entry point into mballoc to allocate blocks
4273 * it tries to use preallocation first, then falls back
4274 * to usual allocation
4275 */
4276ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4277 struct ext4_allocation_request *ar, int *errp)
4278{
6bc6e63f 4279 int freed;
256bdb49 4280 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4281 struct ext4_sb_info *sbi;
4282 struct super_block *sb;
4283 ext4_fsblk_t block = 0;
6bc6e63f
AK
4284 unsigned long inquota;
4285 unsigned long reserv_blks = 0;
c9de560d
AT
4286
4287 sb = ar->inode->i_sb;
4288 sbi = EXT4_SB(sb);
4289
d2a17637
MC
4290 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
4291 /*
4292 * With delalloc we already reserved the blocks
4293 */
030ba6bc
AK
4294 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4295 /* let others to free the space */
4296 yield();
4297 ar->len = ar->len >> 1;
4298 }
4299 if (!ar->len) {
a30d542a
AK
4300 *errp = -ENOSPC;
4301 return 0;
4302 }
6bc6e63f 4303 reserv_blks = ar->len;
07031431 4304 }
c9de560d
AT
4305 while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4306 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4307 ar->len--;
4308 }
4309 if (ar->len == 0) {
4310 *errp = -EDQUOT;
4311 return 0;
4312 }
4313 inquota = ar->len;
4314
d2a17637
MC
4315 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4316 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4317
256bdb49
ES
4318 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4319 if (!ac) {
363d4251 4320 ar->len = 0;
256bdb49 4321 *errp = -ENOMEM;
363d4251 4322 goto out1;
256bdb49
ES
4323 }
4324
c9de560d
AT
4325 ext4_mb_poll_new_transaction(sb, handle);
4326
256bdb49 4327 *errp = ext4_mb_initialize_context(ac, ar);
c9de560d
AT
4328 if (*errp) {
4329 ar->len = 0;
363d4251 4330 goto out2;
c9de560d
AT
4331 }
4332
256bdb49
ES
4333 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4334 if (!ext4_mb_use_preallocated(ac)) {
256bdb49
ES
4335 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4336 ext4_mb_normalize_request(ac, ar);
c9de560d
AT
4337repeat:
4338 /* allocate space in core */
256bdb49 4339 ext4_mb_regular_allocator(ac);
c9de560d
AT
4340
4341 /* as we've just preallocated more space than
4342 * user requested orinally, we store allocated
4343 * space in a special descriptor */
256bdb49
ES
4344 if (ac->ac_status == AC_STATUS_FOUND &&
4345 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4346 ext4_mb_new_preallocation(ac);
c9de560d
AT
4347 }
4348
256bdb49 4349 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
6bc6e63f 4350 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
519deca0
AK
4351 if (*errp == -EAGAIN) {
4352 ac->ac_b_ex.fe_group = 0;
4353 ac->ac_b_ex.fe_start = 0;
4354 ac->ac_b_ex.fe_len = 0;
4355 ac->ac_status = AC_STATUS_CONTINUE;
4356 goto repeat;
4357 } else if (*errp) {
4358 ac->ac_b_ex.fe_len = 0;
4359 ar->len = 0;
4360 ext4_mb_show_ac(ac);
4361 } else {
4362 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4363 ar->len = ac->ac_b_ex.fe_len;
4364 }
c9de560d 4365 } else {
256bdb49 4366 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
c9de560d
AT
4367 if (freed)
4368 goto repeat;
4369 *errp = -ENOSPC;
256bdb49 4370 ac->ac_b_ex.fe_len = 0;
c9de560d 4371 ar->len = 0;
256bdb49 4372 ext4_mb_show_ac(ac);
c9de560d
AT
4373 }
4374
256bdb49 4375 ext4_mb_release_context(ac);
c9de560d 4376
363d4251
SF
4377out2:
4378 kmem_cache_free(ext4_ac_cachep, ac);
4379out1:
c9de560d
AT
4380 if (ar->len < inquota)
4381 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4382
4383 return block;
4384}
4385static void ext4_mb_poll_new_transaction(struct super_block *sb,
4386 handle_t *handle)
4387{
4388 struct ext4_sb_info *sbi = EXT4_SB(sb);
4389
4390 if (sbi->s_last_transaction == handle->h_transaction->t_tid)
4391 return;
4392
4393 /* new transaction! time to close last one and free blocks for
4394 * committed transaction. we know that only transaction can be
4395 * active, so previos transaction can be being logged and we
4396 * know that transaction before previous is known to be already
4397 * logged. this means that now we may free blocks freed in all
4398 * transactions before previous one. hope I'm clear enough ... */
4399
4400 spin_lock(&sbi->s_md_lock);
4401 if (sbi->s_last_transaction != handle->h_transaction->t_tid) {
4402 mb_debug("new transaction %lu, old %lu\n",
4403 (unsigned long) handle->h_transaction->t_tid,
4404 (unsigned long) sbi->s_last_transaction);
4405 list_splice_init(&sbi->s_closed_transaction,
4406 &sbi->s_committed_transaction);
4407 list_splice_init(&sbi->s_active_transaction,
4408 &sbi->s_closed_transaction);
4409 sbi->s_last_transaction = handle->h_transaction->t_tid;
4410 }
4411 spin_unlock(&sbi->s_md_lock);
4412
4413 ext4_mb_free_committed_blocks(sb);
4414}
4415
4ddfef7b
ES
4416static noinline_for_stack int
4417ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
c9de560d
AT
4418 ext4_group_t group, ext4_grpblk_t block, int count)
4419{
4420 struct ext4_group_info *db = e4b->bd_info;
4421 struct super_block *sb = e4b->bd_sb;
4422 struct ext4_sb_info *sbi = EXT4_SB(sb);
4423 struct ext4_free_metadata *md;
4424 int i;
4425
4426 BUG_ON(e4b->bd_bitmap_page == NULL);
4427 BUG_ON(e4b->bd_buddy_page == NULL);
4428
4429 ext4_lock_group(sb, group);
4430 for (i = 0; i < count; i++) {
4431 md = db->bb_md_cur;
4432 if (md && db->bb_tid != handle->h_transaction->t_tid) {
4433 db->bb_md_cur = NULL;
4434 md = NULL;
4435 }
4436
4437 if (md == NULL) {
4438 ext4_unlock_group(sb, group);
4439 md = kmalloc(sizeof(*md), GFP_NOFS);
4440 if (md == NULL)
4441 return -ENOMEM;
4442 md->num = 0;
4443 md->group = group;
4444
4445 ext4_lock_group(sb, group);
4446 if (db->bb_md_cur == NULL) {
4447 spin_lock(&sbi->s_md_lock);
4448 list_add(&md->list, &sbi->s_active_transaction);
4449 spin_unlock(&sbi->s_md_lock);
4450 /* protect buddy cache from being freed,
4451 * otherwise we'll refresh it from
4452 * on-disk bitmap and lose not-yet-available
4453 * blocks */
4454 page_cache_get(e4b->bd_buddy_page);
4455 page_cache_get(e4b->bd_bitmap_page);
4456 db->bb_md_cur = md;
4457 db->bb_tid = handle->h_transaction->t_tid;
4458 mb_debug("new md 0x%p for group %lu\n",
4459 md, md->group);
4460 } else {
4461 kfree(md);
4462 md = db->bb_md_cur;
4463 }
4464 }
4465
4466 BUG_ON(md->num >= EXT4_BB_MAX_BLOCKS);
4467 md->blocks[md->num] = block + i;
4468 md->num++;
4469 if (md->num == EXT4_BB_MAX_BLOCKS) {
4470 /* no more space, put full container on a sb's list */
4471 db->bb_md_cur = NULL;
4472 }
4473 }
4474 ext4_unlock_group(sb, group);
4475 return 0;
4476}
4477
4478/*
4479 * Main entry point into mballoc to free blocks
4480 */
4481void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4482 unsigned long block, unsigned long count,
4483 int metadata, unsigned long *freed)
4484{
26346ff6 4485 struct buffer_head *bitmap_bh = NULL;
c9de560d 4486 struct super_block *sb = inode->i_sb;
256bdb49 4487 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
4488 struct ext4_group_desc *gdp;
4489 struct ext4_super_block *es;
4490 unsigned long overflow;
4491 ext4_grpblk_t bit;
4492 struct buffer_head *gd_bh;
4493 ext4_group_t block_group;
4494 struct ext4_sb_info *sbi;
4495 struct ext4_buddy e4b;
4496 int err = 0;
4497 int ret;
4498
4499 *freed = 0;
4500
4501 ext4_mb_poll_new_transaction(sb, handle);
4502
4503 sbi = EXT4_SB(sb);
4504 es = EXT4_SB(sb)->s_es;
4505 if (block < le32_to_cpu(es->s_first_data_block) ||
4506 block + count < block ||
4507 block + count > ext4_blocks_count(es)) {
46e665e9 4508 ext4_error(sb, __func__,
c9de560d
AT
4509 "Freeing blocks not in datazone - "
4510 "block = %lu, count = %lu", block, count);
4511 goto error_return;
4512 }
4513
4514 ext4_debug("freeing block %lu\n", block);
4515
256bdb49
ES
4516 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4517 if (ac) {
4518 ac->ac_op = EXT4_MB_HISTORY_FREE;
4519 ac->ac_inode = inode;
4520 ac->ac_sb = sb;
4521 }
c9de560d
AT
4522
4523do_more:
4524 overflow = 0;
4525 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4526
4527 /*
4528 * Check to see if we are freeing blocks across a group
4529 * boundary.
4530 */
4531 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4532 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4533 count -= overflow;
4534 }
574ca174 4535 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
ce89f46c
AK
4536 if (!bitmap_bh) {
4537 err = -EIO;
c9de560d 4538 goto error_return;
ce89f46c 4539 }
c9de560d 4540 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
ce89f46c
AK
4541 if (!gdp) {
4542 err = -EIO;
c9de560d 4543 goto error_return;
ce89f46c 4544 }
c9de560d
AT
4545
4546 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4547 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4548 in_range(block, ext4_inode_table(sb, gdp),
4549 EXT4_SB(sb)->s_itb_per_group) ||
4550 in_range(block + count - 1, ext4_inode_table(sb, gdp),
4551 EXT4_SB(sb)->s_itb_per_group)) {
4552
46e665e9 4553 ext4_error(sb, __func__,
c9de560d
AT
4554 "Freeing blocks in system zone - "
4555 "Block = %lu, count = %lu", block, count);
519deca0
AK
4556 /* err = 0. ext4_std_error should be a no op */
4557 goto error_return;
c9de560d
AT
4558 }
4559
4560 BUFFER_TRACE(bitmap_bh, "getting write access");
4561 err = ext4_journal_get_write_access(handle, bitmap_bh);
4562 if (err)
4563 goto error_return;
4564
4565 /*
4566 * We are about to modify some metadata. Call the journal APIs
4567 * to unshare ->b_data if a currently-committing transaction is
4568 * using it
4569 */
4570 BUFFER_TRACE(gd_bh, "get_write_access");
4571 err = ext4_journal_get_write_access(handle, gd_bh);
4572 if (err)
4573 goto error_return;
4574
4575 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4576 if (err)
4577 goto error_return;
4578
4579#ifdef AGGRESSIVE_CHECK
4580 {
4581 int i;
4582 for (i = 0; i < count; i++)
4583 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4584 }
4585#endif
4586 mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4587 bit, count);
4588
4589 /* We dirtied the bitmap block */
4590 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4591 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
4592
256bdb49
ES
4593 if (ac) {
4594 ac->ac_b_ex.fe_group = block_group;
4595 ac->ac_b_ex.fe_start = bit;
4596 ac->ac_b_ex.fe_len = count;
4597 ext4_mb_store_history(ac);
4598 }
c9de560d
AT
4599
4600 if (metadata) {
4601 /* blocks being freed are metadata. these blocks shouldn't
4602 * be used until this transaction is committed */
4603 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
4604 } else {
4605 ext4_lock_group(sb, block_group);
7e5a8cdd 4606 mb_free_blocks(inode, &e4b, bit, count);
c9de560d
AT
4607 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4608 ext4_unlock_group(sb, block_group);
c9de560d
AT
4609 }
4610
4611 spin_lock(sb_bgl_lock(sbi, block_group));
e8546d06 4612 le16_add_cpu(&gdp->bg_free_blocks_count, count);
c9de560d
AT
4613 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4614 spin_unlock(sb_bgl_lock(sbi, block_group));
4615 percpu_counter_add(&sbi->s_freeblocks_counter, count);
4616
772cb7c8
JS
4617 if (sbi->s_log_groups_per_flex) {
4618 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4619 spin_lock(sb_bgl_lock(sbi, flex_group));
4620 sbi->s_flex_groups[flex_group].free_blocks += count;
4621 spin_unlock(sb_bgl_lock(sbi, flex_group));
4622 }
4623
c9de560d
AT
4624 ext4_mb_release_desc(&e4b);
4625
4626 *freed += count;
4627
4628 /* And the group descriptor block */
4629 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4630 ret = ext4_journal_dirty_metadata(handle, gd_bh);
4631 if (!err)
4632 err = ret;
4633
4634 if (overflow && !err) {
4635 block += count;
4636 count = overflow;
4637 put_bh(bitmap_bh);
4638 goto do_more;
4639 }
4640 sb->s_dirt = 1;
4641error_return:
4642 brelse(bitmap_bh);
4643 ext4_std_error(sb, err);
256bdb49
ES
4644 if (ac)
4645 kmem_cache_free(ext4_ac_cachep, ac);
c9de560d
AT
4646 return;
4647}