]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/ext3/balloc.c | |
3 | * | |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | |
5 | * Remy Card (card@masi.ibp.fr) | |
6 | * Laboratoire MASI - Institut Blaise Pascal | |
7 | * Universite Pierre et Marie Curie (Paris VI) | |
8 | * | |
9 | * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993 | |
10 | * Big-endian to little-endian byte-swapping/bitmaps by | |
11 | * David S. Miller (davem@caip.rutgers.edu), 1995 | |
12 | */ | |
13 | ||
14 | #include <linux/config.h> | |
15 | #include <linux/time.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/jbd.h> | |
18 | #include <linux/ext3_fs.h> | |
19 | #include <linux/ext3_jbd.h> | |
20 | #include <linux/quotaops.h> | |
21 | #include <linux/buffer_head.h> | |
22 | ||
23 | /* | |
24 | * balloc.c contains the blocks allocation and deallocation routines | |
25 | */ | |
26 | ||
27 | /* | |
28 | * The free blocks are managed by bitmaps. A file system contains several | |
29 | * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap | |
30 | * block for inodes, N blocks for the inode table and data blocks. | |
31 | * | |
32 | * The file system contains group descriptors which are located after the | |
33 | * super block. Each descriptor contains the number of the bitmap block and | |
34 | * the free blocks count in the block. The descriptors are loaded in memory | |
35 | * when a file system is mounted (see ext3_read_super). | |
36 | */ | |
37 | ||
38 | ||
39 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | |
40 | ||
41 | struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | |
42 | unsigned int block_group, | |
43 | struct buffer_head ** bh) | |
44 | { | |
45 | unsigned long group_desc; | |
46 | unsigned long offset; | |
47 | struct ext3_group_desc * desc; | |
48 | struct ext3_sb_info *sbi = EXT3_SB(sb); | |
49 | ||
50 | if (block_group >= sbi->s_groups_count) { | |
51 | ext3_error (sb, "ext3_get_group_desc", | |
52 | "block_group >= groups_count - " | |
53 | "block_group = %d, groups_count = %lu", | |
54 | block_group, sbi->s_groups_count); | |
55 | ||
56 | return NULL; | |
57 | } | |
58 | smp_rmb(); | |
59 | ||
60 | group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); | |
61 | offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); | |
62 | if (!sbi->s_group_desc[group_desc]) { | |
63 | ext3_error (sb, "ext3_get_group_desc", | |
64 | "Group descriptor not loaded - " | |
65 | "block_group = %d, group_desc = %lu, desc = %lu", | |
66 | block_group, group_desc, offset); | |
67 | return NULL; | |
68 | } | |
69 | ||
70 | desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; | |
71 | if (bh) | |
72 | *bh = sbi->s_group_desc[group_desc]; | |
73 | return desc + offset; | |
74 | } | |
75 | ||
76 | /* | |
77 | * Read the bitmap for a given block_group, reading into the specified | |
78 | * slot in the superblock's bitmap cache. | |
79 | * | |
80 | * Return buffer_head on success or NULL in case of failure. | |
81 | */ | |
82 | static struct buffer_head * | |
83 | read_block_bitmap(struct super_block *sb, unsigned int block_group) | |
84 | { | |
85 | struct ext3_group_desc * desc; | |
86 | struct buffer_head * bh = NULL; | |
87 | ||
88 | desc = ext3_get_group_desc (sb, block_group, NULL); | |
89 | if (!desc) | |
90 | goto error_out; | |
91 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); | |
92 | if (!bh) | |
93 | ext3_error (sb, "read_block_bitmap", | |
94 | "Cannot read block bitmap - " | |
95 | "block_group = %d, block_bitmap = %u", | |
96 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | |
97 | error_out: | |
98 | return bh; | |
99 | } | |
100 | /* | |
101 | * The reservation window structure operations | |
102 | * -------------------------------------------- | |
103 | * Operations include: | |
104 | * dump, find, add, remove, is_empty, find_next_reservable_window, etc. | |
105 | * | |
106 | * We use sorted double linked list for the per-filesystem reservation | |
107 | * window list. (like in vm_region). | |
108 | * | |
109 | * Initially, we keep those small operations in the abstract functions, | |
110 | * so later if we need a better searching tree than double linked-list, | |
111 | * we could easily switch to that without changing too much | |
112 | * code. | |
113 | */ | |
114 | #if 0 | |
115 | static void __rsv_window_dump(struct rb_root *root, int verbose, | |
116 | const char *fn) | |
117 | { | |
118 | struct rb_node *n; | |
119 | struct ext3_reserve_window_node *rsv, *prev; | |
120 | int bad; | |
121 | ||
122 | restart: | |
123 | n = rb_first(root); | |
124 | bad = 0; | |
125 | prev = NULL; | |
126 | ||
127 | printk("Block Allocation Reservation Windows Map (%s):\n", fn); | |
128 | while (n) { | |
129 | rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node); | |
130 | if (verbose) | |
131 | printk("reservation window 0x%p " | |
132 | "start: %d, end: %d\n", | |
133 | rsv, rsv->rsv_start, rsv->rsv_end); | |
134 | if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) { | |
135 | printk("Bad reservation %p (start >= end)\n", | |
136 | rsv); | |
137 | bad = 1; | |
138 | } | |
139 | if (prev && prev->rsv_end >= rsv->rsv_start) { | |
140 | printk("Bad reservation %p (prev->end >= start)\n", | |
141 | rsv); | |
142 | bad = 1; | |
143 | } | |
144 | if (bad) { | |
145 | if (!verbose) { | |
146 | printk("Restarting reservation walk in verbose mode\n"); | |
147 | verbose = 1; | |
148 | goto restart; | |
149 | } | |
150 | } | |
151 | n = rb_next(n); | |
152 | prev = rsv; | |
153 | } | |
154 | printk("Window map complete.\n"); | |
155 | if (bad) | |
156 | BUG(); | |
157 | } | |
158 | #define rsv_window_dump(root, verbose) \ | |
159 | __rsv_window_dump((root), (verbose), __FUNCTION__) | |
160 | #else | |
161 | #define rsv_window_dump(root, verbose) do {} while (0) | |
162 | #endif | |
163 | ||
164 | static int | |
165 | goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal, | |
166 | unsigned int group, struct super_block * sb) | |
167 | { | |
168 | unsigned long group_first_block, group_last_block; | |
169 | ||
170 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
171 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
172 | group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | |
173 | ||
174 | if ((rsv->_rsv_start > group_last_block) || | |
175 | (rsv->_rsv_end < group_first_block)) | |
176 | return 0; | |
177 | if ((goal >= 0) && ((goal + group_first_block < rsv->_rsv_start) | |
178 | || (goal + group_first_block > rsv->_rsv_end))) | |
179 | return 0; | |
180 | return 1; | |
181 | } | |
182 | ||
183 | /* | |
184 | * Find the reserved window which includes the goal, or the previous one | |
185 | * if the goal is not in any window. | |
186 | * Returns NULL if there are no windows or if all windows start after the goal. | |
187 | */ | |
188 | static struct ext3_reserve_window_node * | |
189 | search_reserve_window(struct rb_root *root, unsigned long goal) | |
190 | { | |
191 | struct rb_node *n = root->rb_node; | |
192 | struct ext3_reserve_window_node *rsv; | |
193 | ||
194 | if (!n) | |
195 | return NULL; | |
196 | ||
197 | do { | |
198 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | |
199 | ||
200 | if (goal < rsv->rsv_start) | |
201 | n = n->rb_left; | |
202 | else if (goal > rsv->rsv_end) | |
203 | n = n->rb_right; | |
204 | else | |
205 | return rsv; | |
206 | } while (n); | |
207 | /* | |
208 | * We've fallen off the end of the tree: the goal wasn't inside | |
209 | * any particular node. OK, the previous node must be to one | |
210 | * side of the interval containing the goal. If it's the RHS, | |
211 | * we need to back up one. | |
212 | */ | |
213 | if (rsv->rsv_start > goal) { | |
214 | n = rb_prev(&rsv->rsv_node); | |
215 | rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); | |
216 | } | |
217 | return rsv; | |
218 | } | |
219 | ||
220 | void ext3_rsv_window_add(struct super_block *sb, | |
221 | struct ext3_reserve_window_node *rsv) | |
222 | { | |
223 | struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; | |
224 | struct rb_node *node = &rsv->rsv_node; | |
225 | unsigned int start = rsv->rsv_start; | |
226 | ||
227 | struct rb_node ** p = &root->rb_node; | |
228 | struct rb_node * parent = NULL; | |
229 | struct ext3_reserve_window_node *this; | |
230 | ||
231 | while (*p) | |
232 | { | |
233 | parent = *p; | |
234 | this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); | |
235 | ||
236 | if (start < this->rsv_start) | |
237 | p = &(*p)->rb_left; | |
238 | else if (start > this->rsv_end) | |
239 | p = &(*p)->rb_right; | |
240 | else | |
241 | BUG(); | |
242 | } | |
243 | ||
244 | rb_link_node(node, parent, p); | |
245 | rb_insert_color(node, root); | |
246 | } | |
247 | ||
248 | static void rsv_window_remove(struct super_block *sb, | |
249 | struct ext3_reserve_window_node *rsv) | |
250 | { | |
251 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
252 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
253 | rsv->rsv_alloc_hit = 0; | |
254 | rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root); | |
255 | } | |
256 | ||
257 | static inline int rsv_is_empty(struct ext3_reserve_window *rsv) | |
258 | { | |
259 | /* a valid reservation end block could not be 0 */ | |
260 | return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED); | |
261 | } | |
262 | void ext3_init_block_alloc_info(struct inode *inode) | |
263 | { | |
264 | struct ext3_inode_info *ei = EXT3_I(inode); | |
265 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | |
266 | struct super_block *sb = inode->i_sb; | |
267 | ||
268 | block_i = kmalloc(sizeof(*block_i), GFP_NOFS); | |
269 | if (block_i) { | |
270 | struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node; | |
271 | ||
272 | rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
273 | rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; | |
274 | ||
275 | /* | |
276 | * if filesystem is mounted with NORESERVATION, the goal | |
277 | * reservation window size is set to zero to indicate | |
278 | * block reservation is off | |
279 | */ | |
280 | if (!test_opt(sb, RESERVATION)) | |
281 | rsv->rsv_goal_size = 0; | |
282 | else | |
283 | rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS; | |
284 | rsv->rsv_alloc_hit = 0; | |
285 | block_i->last_alloc_logical_block = 0; | |
286 | block_i->last_alloc_physical_block = 0; | |
287 | } | |
288 | ei->i_block_alloc_info = block_i; | |
289 | } | |
290 | ||
291 | void ext3_discard_reservation(struct inode *inode) | |
292 | { | |
293 | struct ext3_inode_info *ei = EXT3_I(inode); | |
294 | struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info; | |
295 | struct ext3_reserve_window_node *rsv; | |
296 | spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock; | |
297 | ||
298 | if (!block_i) | |
299 | return; | |
300 | ||
301 | rsv = &block_i->rsv_window_node; | |
302 | if (!rsv_is_empty(&rsv->rsv_window)) { | |
303 | spin_lock(rsv_lock); | |
304 | if (!rsv_is_empty(&rsv->rsv_window)) | |
305 | rsv_window_remove(inode->i_sb, rsv); | |
306 | spin_unlock(rsv_lock); | |
307 | } | |
308 | } | |
309 | ||
310 | /* Free given blocks, update quota and i_blocks field */ | |
311 | void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, | |
312 | unsigned long block, unsigned long count, | |
313 | int *pdquot_freed_blocks) | |
314 | { | |
315 | struct buffer_head *bitmap_bh = NULL; | |
316 | struct buffer_head *gd_bh; | |
317 | unsigned long block_group; | |
318 | unsigned long bit; | |
319 | unsigned long i; | |
320 | unsigned long overflow; | |
321 | struct ext3_group_desc * desc; | |
322 | struct ext3_super_block * es; | |
323 | struct ext3_sb_info *sbi; | |
324 | int err = 0, ret; | |
325 | unsigned group_freed; | |
326 | ||
327 | *pdquot_freed_blocks = 0; | |
328 | sbi = EXT3_SB(sb); | |
329 | es = sbi->s_es; | |
330 | if (block < le32_to_cpu(es->s_first_data_block) || | |
331 | block + count < block || | |
332 | block + count > le32_to_cpu(es->s_blocks_count)) { | |
333 | ext3_error (sb, "ext3_free_blocks", | |
334 | "Freeing blocks not in datazone - " | |
335 | "block = %lu, count = %lu", block, count); | |
336 | goto error_return; | |
337 | } | |
338 | ||
339 | ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); | |
340 | ||
341 | do_more: | |
342 | overflow = 0; | |
343 | block_group = (block - le32_to_cpu(es->s_first_data_block)) / | |
344 | EXT3_BLOCKS_PER_GROUP(sb); | |
345 | bit = (block - le32_to_cpu(es->s_first_data_block)) % | |
346 | EXT3_BLOCKS_PER_GROUP(sb); | |
347 | /* | |
348 | * Check to see if we are freeing blocks across a group | |
349 | * boundary. | |
350 | */ | |
351 | if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) { | |
352 | overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb); | |
353 | count -= overflow; | |
354 | } | |
355 | brelse(bitmap_bh); | |
356 | bitmap_bh = read_block_bitmap(sb, block_group); | |
357 | if (!bitmap_bh) | |
358 | goto error_return; | |
359 | desc = ext3_get_group_desc (sb, block_group, &gd_bh); | |
360 | if (!desc) | |
361 | goto error_return; | |
362 | ||
363 | if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) || | |
364 | in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) || | |
365 | in_range (block, le32_to_cpu(desc->bg_inode_table), | |
366 | sbi->s_itb_per_group) || | |
367 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), | |
368 | sbi->s_itb_per_group)) | |
369 | ext3_error (sb, "ext3_free_blocks", | |
370 | "Freeing blocks in system zones - " | |
371 | "Block = %lu, count = %lu", | |
372 | block, count); | |
373 | ||
374 | /* | |
375 | * We are about to start releasing blocks in the bitmap, | |
376 | * so we need undo access. | |
377 | */ | |
378 | /* @@@ check errors */ | |
379 | BUFFER_TRACE(bitmap_bh, "getting undo access"); | |
380 | err = ext3_journal_get_undo_access(handle, bitmap_bh); | |
381 | if (err) | |
382 | goto error_return; | |
383 | ||
384 | /* | |
385 | * We are about to modify some metadata. Call the journal APIs | |
386 | * to unshare ->b_data if a currently-committing transaction is | |
387 | * using it | |
388 | */ | |
389 | BUFFER_TRACE(gd_bh, "get_write_access"); | |
390 | err = ext3_journal_get_write_access(handle, gd_bh); | |
391 | if (err) | |
392 | goto error_return; | |
393 | ||
394 | jbd_lock_bh_state(bitmap_bh); | |
395 | ||
396 | for (i = 0, group_freed = 0; i < count; i++) { | |
397 | /* | |
398 | * An HJ special. This is expensive... | |
399 | */ | |
400 | #ifdef CONFIG_JBD_DEBUG | |
401 | jbd_unlock_bh_state(bitmap_bh); | |
402 | { | |
403 | struct buffer_head *debug_bh; | |
404 | debug_bh = sb_find_get_block(sb, block + i); | |
405 | if (debug_bh) { | |
406 | BUFFER_TRACE(debug_bh, "Deleted!"); | |
407 | if (!bh2jh(bitmap_bh)->b_committed_data) | |
408 | BUFFER_TRACE(debug_bh, | |
409 | "No commited data in bitmap"); | |
410 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap"); | |
411 | __brelse(debug_bh); | |
412 | } | |
413 | } | |
414 | jbd_lock_bh_state(bitmap_bh); | |
415 | #endif | |
416 | if (need_resched()) { | |
417 | jbd_unlock_bh_state(bitmap_bh); | |
418 | cond_resched(); | |
419 | jbd_lock_bh_state(bitmap_bh); | |
420 | } | |
421 | /* @@@ This prevents newly-allocated data from being | |
422 | * freed and then reallocated within the same | |
423 | * transaction. | |
424 | * | |
425 | * Ideally we would want to allow that to happen, but to | |
426 | * do so requires making journal_forget() capable of | |
427 | * revoking the queued write of a data block, which | |
428 | * implies blocking on the journal lock. *forget() | |
429 | * cannot block due to truncate races. | |
430 | * | |
431 | * Eventually we can fix this by making journal_forget() | |
432 | * return a status indicating whether or not it was able | |
433 | * to revoke the buffer. On successful revoke, it is | |
434 | * safe not to set the allocation bit in the committed | |
435 | * bitmap, because we know that there is no outstanding | |
436 | * activity on the buffer any more and so it is safe to | |
437 | * reallocate it. | |
438 | */ | |
439 | BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); | |
440 | J_ASSERT_BH(bitmap_bh, | |
441 | bh2jh(bitmap_bh)->b_committed_data != NULL); | |
442 | ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i, | |
443 | bh2jh(bitmap_bh)->b_committed_data); | |
444 | ||
445 | /* | |
446 | * We clear the bit in the bitmap after setting the committed | |
447 | * data bit, because this is the reverse order to that which | |
448 | * the allocator uses. | |
449 | */ | |
450 | BUFFER_TRACE(bitmap_bh, "clear bit"); | |
451 | if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | |
452 | bit + i, bitmap_bh->b_data)) { | |
453 | jbd_unlock_bh_state(bitmap_bh); | |
454 | ext3_error(sb, __FUNCTION__, | |
455 | "bit already cleared for block %lu", block + i); | |
456 | jbd_lock_bh_state(bitmap_bh); | |
457 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | |
458 | } else { | |
459 | group_freed++; | |
460 | } | |
461 | } | |
462 | jbd_unlock_bh_state(bitmap_bh); | |
463 | ||
464 | spin_lock(sb_bgl_lock(sbi, block_group)); | |
465 | desc->bg_free_blocks_count = | |
466 | cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + | |
467 | group_freed); | |
468 | spin_unlock(sb_bgl_lock(sbi, block_group)); | |
469 | percpu_counter_mod(&sbi->s_freeblocks_counter, count); | |
470 | ||
471 | /* We dirtied the bitmap block */ | |
472 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | |
473 | err = ext3_journal_dirty_metadata(handle, bitmap_bh); | |
474 | ||
475 | /* And the group descriptor block */ | |
476 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | |
477 | ret = ext3_journal_dirty_metadata(handle, gd_bh); | |
478 | if (!err) err = ret; | |
479 | *pdquot_freed_blocks += group_freed; | |
480 | ||
481 | if (overflow && !err) { | |
482 | block += count; | |
483 | count = overflow; | |
484 | goto do_more; | |
485 | } | |
486 | sb->s_dirt = 1; | |
487 | error_return: | |
488 | brelse(bitmap_bh); | |
489 | ext3_std_error(sb, err); | |
490 | return; | |
491 | } | |
492 | ||
493 | /* Free given blocks, update quota and i_blocks field */ | |
494 | void ext3_free_blocks(handle_t *handle, struct inode *inode, | |
495 | unsigned long block, unsigned long count) | |
496 | { | |
497 | struct super_block * sb; | |
498 | int dquot_freed_blocks; | |
499 | ||
500 | sb = inode->i_sb; | |
501 | if (!sb) { | |
502 | printk ("ext3_free_blocks: nonexistent device"); | |
503 | return; | |
504 | } | |
505 | ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); | |
506 | if (dquot_freed_blocks) | |
507 | DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); | |
508 | return; | |
509 | } | |
510 | ||
511 | /* | |
512 | * For ext3 allocations, we must not reuse any blocks which are | |
513 | * allocated in the bitmap buffer's "last committed data" copy. This | |
514 | * prevents deletes from freeing up the page for reuse until we have | |
515 | * committed the delete transaction. | |
516 | * | |
517 | * If we didn't do this, then deleting something and reallocating it as | |
518 | * data would allow the old block to be overwritten before the | |
519 | * transaction committed (because we force data to disk before commit). | |
520 | * This would lead to corruption if we crashed between overwriting the | |
521 | * data and committing the delete. | |
522 | * | |
523 | * @@@ We may want to make this allocation behaviour conditional on | |
524 | * data-writes at some point, and disable it for metadata allocations or | |
525 | * sync-data inodes. | |
526 | */ | |
527 | static int ext3_test_allocatable(int nr, struct buffer_head *bh) | |
528 | { | |
529 | int ret; | |
530 | struct journal_head *jh = bh2jh(bh); | |
531 | ||
532 | if (ext3_test_bit(nr, bh->b_data)) | |
533 | return 0; | |
534 | ||
535 | jbd_lock_bh_state(bh); | |
536 | if (!jh->b_committed_data) | |
537 | ret = 1; | |
538 | else | |
539 | ret = !ext3_test_bit(nr, jh->b_committed_data); | |
540 | jbd_unlock_bh_state(bh); | |
541 | return ret; | |
542 | } | |
543 | ||
544 | static int | |
545 | bitmap_search_next_usable_block(int start, struct buffer_head *bh, | |
546 | int maxblocks) | |
547 | { | |
548 | int next; | |
549 | struct journal_head *jh = bh2jh(bh); | |
550 | ||
551 | /* | |
552 | * The bitmap search --- search forward alternately through the actual | |
553 | * bitmap and the last-committed copy until we find a bit free in | |
554 | * both | |
555 | */ | |
556 | while (start < maxblocks) { | |
557 | next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start); | |
558 | if (next >= maxblocks) | |
559 | return -1; | |
560 | if (ext3_test_allocatable(next, bh)) | |
561 | return next; | |
562 | jbd_lock_bh_state(bh); | |
563 | if (jh->b_committed_data) | |
564 | start = ext3_find_next_zero_bit(jh->b_committed_data, | |
565 | maxblocks, next); | |
566 | jbd_unlock_bh_state(bh); | |
567 | } | |
568 | return -1; | |
569 | } | |
570 | ||
571 | /* | |
572 | * Find an allocatable block in a bitmap. We honour both the bitmap and | |
573 | * its last-committed copy (if that exists), and perform the "most | |
574 | * appropriate allocation" algorithm of looking for a free block near | |
575 | * the initial goal; then for a free byte somewhere in the bitmap; then | |
576 | * for any free bit in the bitmap. | |
577 | */ | |
578 | static int | |
579 | find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) | |
580 | { | |
581 | int here, next; | |
582 | char *p, *r; | |
583 | ||
584 | if (start > 0) { | |
585 | /* | |
586 | * The goal was occupied; search forward for a free | |
587 | * block within the next XX blocks. | |
588 | * | |
589 | * end_goal is more or less random, but it has to be | |
590 | * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the | |
591 | * next 64-bit boundary is simple.. | |
592 | */ | |
593 | int end_goal = (start + 63) & ~63; | |
594 | if (end_goal > maxblocks) | |
595 | end_goal = maxblocks; | |
596 | here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); | |
597 | if (here < end_goal && ext3_test_allocatable(here, bh)) | |
598 | return here; | |
599 | ext3_debug("Bit not found near goal\n"); | |
600 | } | |
601 | ||
602 | here = start; | |
603 | if (here < 0) | |
604 | here = 0; | |
605 | ||
606 | p = ((char *)bh->b_data) + (here >> 3); | |
607 | r = memscan(p, 0, (maxblocks - here + 7) >> 3); | |
608 | next = (r - ((char *)bh->b_data)) << 3; | |
609 | ||
610 | if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh)) | |
611 | return next; | |
612 | ||
613 | /* | |
614 | * The bitmap search --- search forward alternately through the actual | |
615 | * bitmap and the last-committed copy until we find a bit free in | |
616 | * both | |
617 | */ | |
618 | here = bitmap_search_next_usable_block(here, bh, maxblocks); | |
619 | return here; | |
620 | } | |
621 | ||
622 | /* | |
623 | * We think we can allocate this block in this bitmap. Try to set the bit. | |
624 | * If that succeeds then check that nobody has allocated and then freed the | |
625 | * block since we saw that is was not marked in b_committed_data. If it _was_ | |
626 | * allocated and freed then clear the bit in the bitmap again and return | |
627 | * zero (failure). | |
628 | */ | |
629 | static inline int | |
630 | claim_block(spinlock_t *lock, int block, struct buffer_head *bh) | |
631 | { | |
632 | struct journal_head *jh = bh2jh(bh); | |
633 | int ret; | |
634 | ||
635 | if (ext3_set_bit_atomic(lock, block, bh->b_data)) | |
636 | return 0; | |
637 | jbd_lock_bh_state(bh); | |
638 | if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) { | |
639 | ext3_clear_bit_atomic(lock, block, bh->b_data); | |
640 | ret = 0; | |
641 | } else { | |
642 | ret = 1; | |
643 | } | |
644 | jbd_unlock_bh_state(bh); | |
645 | return ret; | |
646 | } | |
647 | ||
648 | /* | |
649 | * If we failed to allocate the desired block then we may end up crossing to a | |
650 | * new bitmap. In that case we must release write access to the old one via | |
651 | * ext3_journal_release_buffer(), else we'll run out of credits. | |
652 | */ | |
653 | static int | |
654 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | |
655 | struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) | |
656 | { | |
657 | int group_first_block, start, end; | |
658 | ||
659 | /* we do allocation within the reservation window if we have a window */ | |
660 | if (my_rsv) { | |
661 | group_first_block = | |
662 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
663 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
664 | if (my_rsv->_rsv_start >= group_first_block) | |
665 | start = my_rsv->_rsv_start - group_first_block; | |
666 | else | |
667 | /* reservation window cross group boundary */ | |
668 | start = 0; | |
669 | end = my_rsv->_rsv_end - group_first_block + 1; | |
670 | if (end > EXT3_BLOCKS_PER_GROUP(sb)) | |
671 | /* reservation window crosses group boundary */ | |
672 | end = EXT3_BLOCKS_PER_GROUP(sb); | |
673 | if ((start <= goal) && (goal < end)) | |
674 | start = goal; | |
675 | else | |
676 | goal = -1; | |
677 | } else { | |
678 | if (goal > 0) | |
679 | start = goal; | |
680 | else | |
681 | start = 0; | |
682 | end = EXT3_BLOCKS_PER_GROUP(sb); | |
683 | } | |
684 | ||
685 | BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); | |
686 | ||
687 | repeat: | |
688 | if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) { | |
689 | goal = find_next_usable_block(start, bitmap_bh, end); | |
690 | if (goal < 0) | |
691 | goto fail_access; | |
692 | if (!my_rsv) { | |
693 | int i; | |
694 | ||
695 | for (i = 0; i < 7 && goal > start && | |
696 | ext3_test_allocatable(goal - 1, | |
697 | bitmap_bh); | |
698 | i++, goal--) | |
699 | ; | |
700 | } | |
701 | } | |
702 | start = goal; | |
703 | ||
704 | if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { | |
705 | /* | |
706 | * The block was allocated by another thread, or it was | |
707 | * allocated and then freed by another thread | |
708 | */ | |
709 | start++; | |
710 | goal++; | |
711 | if (start >= end) | |
712 | goto fail_access; | |
713 | goto repeat; | |
714 | } | |
715 | return goal; | |
716 | fail_access: | |
717 | return -1; | |
718 | } | |
719 | ||
720 | /** | |
721 | * find_next_reservable_window(): | |
722 | * find a reservable space within the given range. | |
723 | * It does not allocate the reservation window for now: | |
724 | * alloc_new_reservation() will do the work later. | |
725 | * | |
726 | * @search_head: the head of the searching list; | |
727 | * This is not necessarily the list head of the whole filesystem | |
728 | * | |
729 | * We have both head and start_block to assist the search | |
730 | * for the reservable space. The list starts from head, | |
731 | * but we will shift to the place where start_block is, | |
732 | * then start from there, when looking for a reservable space. | |
733 | * | |
734 | * @size: the target new reservation window size | |
735 | * | |
736 | * @group_first_block: the first block we consider to start | |
737 | * the real search from | |
738 | * | |
739 | * @last_block: | |
740 | * the maximum block number that our goal reservable space | |
741 | * could start from. This is normally the last block in this | |
742 | * group. The search will end when we found the start of next | |
743 | * possible reservable space is out of this boundary. | |
744 | * This could handle the cross boundary reservation window | |
745 | * request. | |
746 | * | |
747 | * basically we search from the given range, rather than the whole | |
748 | * reservation double linked list, (start_block, last_block) | |
749 | * to find a free region that is of my size and has not | |
750 | * been reserved. | |
751 | * | |
1da177e4 | 752 | */ |
21fe3471 | 753 | static int find_next_reservable_window( |
1da177e4 | 754 | struct ext3_reserve_window_node *search_head, |
21fe3471 MC |
755 | struct ext3_reserve_window_node *my_rsv, |
756 | struct super_block * sb, int start_block, | |
1da177e4 LT |
757 | int last_block) |
758 | { | |
759 | struct rb_node *next; | |
760 | struct ext3_reserve_window_node *rsv, *prev; | |
761 | int cur; | |
21fe3471 | 762 | int size = my_rsv->rsv_goal_size; |
1da177e4 LT |
763 | |
764 | /* TODO: make the start of the reservation window byte-aligned */ | |
765 | /* cur = *start_block & ~7;*/ | |
21fe3471 | 766 | cur = start_block; |
1da177e4 LT |
767 | rsv = search_head; |
768 | if (!rsv) | |
21fe3471 | 769 | return -1; |
1da177e4 LT |
770 | |
771 | while (1) { | |
772 | if (cur <= rsv->rsv_end) | |
773 | cur = rsv->rsv_end + 1; | |
774 | ||
775 | /* TODO? | |
776 | * in the case we could not find a reservable space | |
777 | * that is what is expected, during the re-search, we could | |
778 | * remember what's the largest reservable space we could have | |
779 | * and return that one. | |
780 | * | |
781 | * For now it will fail if we could not find the reservable | |
782 | * space with expected-size (or more)... | |
783 | */ | |
784 | if (cur > last_block) | |
21fe3471 | 785 | return -1; /* fail */ |
1da177e4 LT |
786 | |
787 | prev = rsv; | |
788 | next = rb_next(&rsv->rsv_node); | |
21fe3471 | 789 | rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node); |
1da177e4 LT |
790 | |
791 | /* | |
792 | * Reached the last reservation, we can just append to the | |
793 | * previous one. | |
794 | */ | |
795 | if (!next) | |
796 | break; | |
797 | ||
798 | if (cur + size <= rsv->rsv_start) { | |
799 | /* | |
800 | * Found a reserveable space big enough. We could | |
801 | * have a reservation across the group boundary here | |
802 | */ | |
803 | break; | |
804 | } | |
805 | } | |
806 | /* | |
807 | * we come here either : | |
808 | * when we reach the end of the whole list, | |
809 | * and there is empty reservable space after last entry in the list. | |
810 | * append it to the end of the list. | |
811 | * | |
812 | * or we found one reservable space in the middle of the list, | |
813 | * return the reservation window that we could append to. | |
814 | * succeed. | |
815 | */ | |
21fe3471 MC |
816 | |
817 | if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window))) | |
818 | rsv_window_remove(sb, my_rsv); | |
819 | ||
820 | /* | |
821 | * Let's book the whole avaliable window for now. We will check the | |
822 | * disk bitmap later and then, if there are free blocks then we adjust | |
823 | * the window size if it's larger than requested. | |
824 | * Otherwise, we will remove this node from the tree next time | |
825 | * call find_next_reservable_window. | |
826 | */ | |
827 | my_rsv->rsv_start = cur; | |
828 | my_rsv->rsv_end = cur + size - 1; | |
829 | my_rsv->rsv_alloc_hit = 0; | |
830 | ||
831 | if (prev != my_rsv) | |
832 | ext3_rsv_window_add(sb, my_rsv); | |
833 | ||
834 | return 0; | |
1da177e4 LT |
835 | } |
836 | ||
837 | /** | |
838 | * alloc_new_reservation()--allocate a new reservation window | |
839 | * | |
840 | * To make a new reservation, we search part of the filesystem | |
841 | * reservation list (the list that inside the group). We try to | |
842 | * allocate a new reservation window near the allocation goal, | |
843 | * or the beginning of the group, if there is no goal. | |
844 | * | |
845 | * We first find a reservable space after the goal, then from | |
846 | * there, we check the bitmap for the first free block after | |
847 | * it. If there is no free block until the end of group, then the | |
848 | * whole group is full, we failed. Otherwise, check if the free | |
849 | * block is inside the expected reservable space, if so, we | |
850 | * succeed. | |
851 | * If the first free block is outside the reservable space, then | |
852 | * start from the first free block, we search for next available | |
853 | * space, and go on. | |
854 | * | |
855 | * on succeed, a new reservation will be found and inserted into the list | |
856 | * It contains at least one free block, and it does not overlap with other | |
857 | * reservation windows. | |
858 | * | |
859 | * failed: we failed to find a reservation window in this group | |
860 | * | |
861 | * @rsv: the reservation | |
862 | * | |
863 | * @goal: The goal (group-relative). It is where the search for a | |
864 | * free reservable space should start from. | |
865 | * if we have a goal(goal >0 ), then start from there, | |
866 | * no goal(goal = -1), we start from the first block | |
867 | * of the group. | |
868 | * | |
869 | * @sb: the super block | |
870 | * @group: the group we are trying to allocate in | |
871 | * @bitmap_bh: the block group block bitmap | |
21fe3471 | 872 | * |
1da177e4 LT |
873 | */ |
874 | static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, | |
875 | int goal, struct super_block *sb, | |
876 | unsigned int group, struct buffer_head *bitmap_bh) | |
877 | { | |
878 | struct ext3_reserve_window_node *search_head; | |
879 | int group_first_block, group_end_block, start_block; | |
880 | int first_free_block; | |
1da177e4 LT |
881 | struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; |
882 | unsigned long size; | |
21fe3471 MC |
883 | int ret; |
884 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | |
1da177e4 LT |
885 | |
886 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
887 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
888 | group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; | |
889 | ||
890 | if (goal < 0) | |
891 | start_block = group_first_block; | |
892 | else | |
893 | start_block = goal + group_first_block; | |
894 | ||
895 | size = my_rsv->rsv_goal_size; | |
21fe3471 | 896 | |
1da177e4 LT |
897 | if (!rsv_is_empty(&my_rsv->rsv_window)) { |
898 | /* | |
899 | * if the old reservation is cross group boundary | |
900 | * and if the goal is inside the old reservation window, | |
901 | * we will come here when we just failed to allocate from | |
902 | * the first part of the window. We still have another part | |
903 | * that belongs to the next group. In this case, there is no | |
904 | * point to discard our window and try to allocate a new one | |
905 | * in this group(which will fail). we should | |
906 | * keep the reservation window, just simply move on. | |
907 | * | |
908 | * Maybe we could shift the start block of the reservation | |
909 | * window to the first block of next group. | |
910 | */ | |
911 | ||
912 | if ((my_rsv->rsv_start <= group_end_block) && | |
913 | (my_rsv->rsv_end > group_end_block) && | |
914 | (start_block >= my_rsv->rsv_start)) | |
915 | return -1; | |
916 | ||
917 | if ((my_rsv->rsv_alloc_hit > | |
918 | (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) { | |
919 | /* | |
920 | * if we previously allocation hit ration is greater than half | |
921 | * we double the size of reservation window next time | |
922 | * otherwise keep the same | |
923 | */ | |
924 | size = size * 2; | |
925 | if (size > EXT3_MAX_RESERVE_BLOCKS) | |
926 | size = EXT3_MAX_RESERVE_BLOCKS; | |
927 | my_rsv->rsv_goal_size= size; | |
928 | } | |
929 | } | |
21fe3471 MC |
930 | |
931 | spin_lock(rsv_lock); | |
1da177e4 LT |
932 | /* |
933 | * shift the search start to the window near the goal block | |
934 | */ | |
935 | search_head = search_reserve_window(fs_rsv_root, start_block); | |
936 | ||
937 | /* | |
938 | * find_next_reservable_window() simply finds a reservable window | |
939 | * inside the given range(start_block, group_end_block). | |
940 | * | |
941 | * To make sure the reservation window has a free bit inside it, we | |
942 | * need to check the bitmap after we found a reservable window. | |
943 | */ | |
944 | retry: | |
21fe3471 MC |
945 | ret = find_next_reservable_window(search_head, my_rsv, sb, |
946 | start_block, group_end_block); | |
947 | ||
948 | if (ret == -1) { | |
949 | if (!rsv_is_empty(&my_rsv->rsv_window)) | |
950 | rsv_window_remove(sb, my_rsv); | |
951 | spin_unlock(rsv_lock); | |
952 | return -1; | |
953 | } | |
954 | ||
1da177e4 LT |
955 | /* |
956 | * On success, find_next_reservable_window() returns the | |
957 | * reservation window where there is a reservable space after it. | |
958 | * Before we reserve this reservable space, we need | |
959 | * to make sure there is at least a free block inside this region. | |
960 | * | |
961 | * searching the first free bit on the block bitmap and copy of | |
962 | * last committed bitmap alternatively, until we found a allocatable | |
963 | * block. Search start from the start block of the reservable space | |
964 | * we just found. | |
965 | */ | |
21fe3471 | 966 | spin_unlock(rsv_lock); |
1da177e4 | 967 | first_free_block = bitmap_search_next_usable_block( |
21fe3471 | 968 | my_rsv->rsv_start - group_first_block, |
1da177e4 LT |
969 | bitmap_bh, group_end_block - group_first_block + 1); |
970 | ||
971 | if (first_free_block < 0) { | |
972 | /* | |
973 | * no free block left on the bitmap, no point | |
974 | * to reserve the space. return failed. | |
975 | */ | |
21fe3471 MC |
976 | spin_lock(rsv_lock); |
977 | if (!rsv_is_empty(&my_rsv->rsv_window)) | |
978 | rsv_window_remove(sb, my_rsv); | |
979 | spin_unlock(rsv_lock); | |
980 | return -1; /* failed */ | |
1da177e4 | 981 | } |
21fe3471 | 982 | |
1da177e4 LT |
983 | start_block = first_free_block + group_first_block; |
984 | /* | |
985 | * check if the first free block is within the | |
21fe3471 | 986 | * free space we just reserved |
1da177e4 | 987 | */ |
21fe3471 MC |
988 | if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) |
989 | return 0; /* success */ | |
1da177e4 LT |
990 | /* |
991 | * if the first free bit we found is out of the reservable space | |
21fe3471 | 992 | * continue search for next reservable space, |
1da177e4 LT |
993 | * start from where the free block is, |
994 | * we also shift the list head to where we stopped last time | |
995 | */ | |
21fe3471 MC |
996 | search_head = my_rsv; |
997 | spin_lock(rsv_lock); | |
1da177e4 | 998 | goto retry; |
1da177e4 LT |
999 | } |
1000 | ||
1001 | /* | |
1002 | * This is the main function used to allocate a new block and its reservation | |
1003 | * window. | |
1004 | * | |
1005 | * Each time when a new block allocation is need, first try to allocate from | |
1006 | * its own reservation. If it does not have a reservation window, instead of | |
1007 | * looking for a free bit on bitmap first, then look up the reservation list to | |
1008 | * see if it is inside somebody else's reservation window, we try to allocate a | |
1009 | * reservation window for it starting from the goal first. Then do the block | |
1010 | * allocation within the reservation window. | |
1011 | * | |
1012 | * This will avoid keeping on searching the reservation list again and | |
1013 | * again when someboday is looking for a free block (without | |
1014 | * reservation), and there are lots of free blocks, but they are all | |
1015 | * being reserved. | |
1016 | * | |
1017 | * We use a sorted double linked list for the per-filesystem reservation list. | |
1018 | * The insert, remove and find a free space(non-reserved) operations for the | |
1019 | * sorted double linked list should be fast. | |
1020 | * | |
1021 | */ | |
1022 | static int | |
1023 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |
1024 | unsigned int group, struct buffer_head *bitmap_bh, | |
1025 | int goal, struct ext3_reserve_window_node * my_rsv, | |
1026 | int *errp) | |
1027 | { | |
1da177e4 LT |
1028 | unsigned long group_first_block; |
1029 | int ret = 0; | |
1030 | int fatal; | |
1031 | ||
1032 | *errp = 0; | |
1033 | ||
1034 | /* | |
1035 | * Make sure we use undo access for the bitmap, because it is critical | |
1036 | * that we do the frozen_data COW on bitmap buffers in all cases even | |
1037 | * if the buffer is in BJ_Forget state in the committing transaction. | |
1038 | */ | |
1039 | BUFFER_TRACE(bitmap_bh, "get undo access for new block"); | |
1040 | fatal = ext3_journal_get_undo_access(handle, bitmap_bh); | |
1041 | if (fatal) { | |
1042 | *errp = fatal; | |
1043 | return -1; | |
1044 | } | |
1045 | ||
1046 | /* | |
1047 | * we don't deal with reservation when | |
1048 | * filesystem is mounted without reservation | |
1049 | * or the file is not a regular file | |
1050 | * or last attempt to allocate a block with reservation turned on failed | |
1051 | */ | |
1052 | if (my_rsv == NULL ) { | |
1053 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); | |
1054 | goto out; | |
1055 | } | |
1da177e4 LT |
1056 | /* |
1057 | * goal is a group relative block number (if there is a goal) | |
1058 | * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb) | |
1059 | * first block is a filesystem wide block number | |
1060 | * first block is the block number of the first block in this group | |
1061 | */ | |
1062 | group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + | |
1063 | group * EXT3_BLOCKS_PER_GROUP(sb); | |
1064 | ||
1065 | /* | |
1066 | * Basically we will allocate a new block from inode's reservation | |
1067 | * window. | |
1068 | * | |
1069 | * We need to allocate a new reservation window, if: | |
1070 | * a) inode does not have a reservation window; or | |
1071 | * b) last attempt to allocate a block from existing reservation | |
1072 | * failed; or | |
1073 | * c) we come here with a goal and with a reservation window | |
1074 | * | |
1075 | * We do not need to allocate a new reservation window if we come here | |
1076 | * at the beginning with a goal and the goal is inside the window, or | |
1077 | * we don't have a goal but already have a reservation window. | |
1078 | * then we could go to allocate from the reservation window directly. | |
1079 | */ | |
1080 | while (1) { | |
21fe3471 MC |
1081 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || |
1082 | !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { | |
1da177e4 LT |
1083 | ret = alloc_new_reservation(my_rsv, goal, sb, |
1084 | group, bitmap_bh); | |
1da177e4 LT |
1085 | if (ret < 0) |
1086 | break; /* failed */ | |
1087 | ||
21fe3471 | 1088 | if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) |
1da177e4 LT |
1089 | goal = -1; |
1090 | } | |
21fe3471 MC |
1091 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) |
1092 | || (my_rsv->rsv_end < group_first_block)) | |
1da177e4 LT |
1093 | BUG(); |
1094 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, | |
21fe3471 | 1095 | &my_rsv->rsv_window); |
1da177e4 LT |
1096 | if (ret >= 0) { |
1097 | my_rsv->rsv_alloc_hit++; | |
1098 | break; /* succeed */ | |
1099 | } | |
1100 | } | |
1101 | out: | |
1102 | if (ret >= 0) { | |
1103 | BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for " | |
1104 | "bitmap block"); | |
1105 | fatal = ext3_journal_dirty_metadata(handle, bitmap_bh); | |
1106 | if (fatal) { | |
1107 | *errp = fatal; | |
1108 | return -1; | |
1109 | } | |
1110 | return ret; | |
1111 | } | |
1112 | ||
1113 | BUFFER_TRACE(bitmap_bh, "journal_release_buffer"); | |
1114 | ext3_journal_release_buffer(handle, bitmap_bh); | |
1115 | return ret; | |
1116 | } | |
1117 | ||
1118 | static int ext3_has_free_blocks(struct ext3_sb_info *sbi) | |
1119 | { | |
1120 | int free_blocks, root_blocks; | |
1121 | ||
1122 | free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | |
1123 | root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); | |
1124 | if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && | |
1125 | sbi->s_resuid != current->fsuid && | |
1126 | (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { | |
1127 | return 0; | |
1128 | } | |
1129 | return 1; | |
1130 | } | |
1131 | ||
1132 | /* | |
1133 | * ext3_should_retry_alloc() is called when ENOSPC is returned, and if | |
1134 | * it is profitable to retry the operation, this function will wait | |
1135 | * for the current or commiting transaction to complete, and then | |
1136 | * return TRUE. | |
1137 | */ | |
1138 | int ext3_should_retry_alloc(struct super_block *sb, int *retries) | |
1139 | { | |
1140 | if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3) | |
1141 | return 0; | |
1142 | ||
1143 | jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id); | |
1144 | ||
1145 | return journal_force_commit_nested(EXT3_SB(sb)->s_journal); | |
1146 | } | |
1147 | ||
1148 | /* | |
1149 | * ext3_new_block uses a goal block to assist allocation. If the goal is | |
1150 | * free, or there is a free block within 32 blocks of the goal, that block | |
1151 | * is allocated. Otherwise a forward search is made for a free block; within | |
1152 | * each block group the search first looks for an entire free byte in the block | |
1153 | * bitmap, and then for any free bit if that fails. | |
1154 | * This function also updates quota and i_blocks field. | |
1155 | */ | |
1156 | int ext3_new_block(handle_t *handle, struct inode *inode, | |
1157 | unsigned long goal, int *errp) | |
1158 | { | |
1159 | struct buffer_head *bitmap_bh = NULL; | |
1160 | struct buffer_head *gdp_bh; | |
1161 | int group_no; | |
1162 | int goal_group; | |
1163 | int ret_block; | |
1164 | int bgi; /* blockgroup iteration index */ | |
1165 | int target_block; | |
1166 | int fatal = 0, err; | |
1167 | int performed_allocation = 0; | |
1168 | int free_blocks; | |
1169 | struct super_block *sb; | |
1170 | struct ext3_group_desc *gdp; | |
1171 | struct ext3_super_block *es; | |
1172 | struct ext3_sb_info *sbi; | |
1173 | struct ext3_reserve_window_node *my_rsv = NULL; | |
1174 | struct ext3_block_alloc_info *block_i; | |
1175 | unsigned short windowsz = 0; | |
1176 | #ifdef EXT3FS_DEBUG | |
1177 | static int goal_hits, goal_attempts; | |
1178 | #endif | |
1179 | unsigned long ngroups; | |
1180 | ||
1181 | *errp = -ENOSPC; | |
1182 | sb = inode->i_sb; | |
1183 | if (!sb) { | |
1184 | printk("ext3_new_block: nonexistent device"); | |
1185 | return 0; | |
1186 | } | |
1187 | ||
1188 | /* | |
1189 | * Check quota for allocation of this block. | |
1190 | */ | |
1191 | if (DQUOT_ALLOC_BLOCK(inode, 1)) { | |
1192 | *errp = -EDQUOT; | |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | sbi = EXT3_SB(sb); | |
1197 | es = EXT3_SB(sb)->s_es; | |
1198 | ext3_debug("goal=%lu.\n", goal); | |
1199 | /* | |
1200 | * Allocate a block from reservation only when | |
1201 | * filesystem is mounted with reservation(default,-o reservation), and | |
1202 | * it's a regular file, and | |
1203 | * the desired window size is greater than 0 (One could use ioctl | |
1204 | * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off | |
1205 | * reservation on that particular file) | |
1206 | */ | |
1207 | block_i = EXT3_I(inode)->i_block_alloc_info; | |
1208 | if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0)) | |
1209 | my_rsv = &block_i->rsv_window_node; | |
1210 | ||
1211 | if (!ext3_has_free_blocks(sbi)) { | |
1212 | *errp = -ENOSPC; | |
1213 | goto out; | |
1214 | } | |
1215 | ||
1216 | /* | |
1217 | * First, test whether the goal block is free. | |
1218 | */ | |
1219 | if (goal < le32_to_cpu(es->s_first_data_block) || | |
1220 | goal >= le32_to_cpu(es->s_blocks_count)) | |
1221 | goal = le32_to_cpu(es->s_first_data_block); | |
1222 | group_no = (goal - le32_to_cpu(es->s_first_data_block)) / | |
1223 | EXT3_BLOCKS_PER_GROUP(sb); | |
1224 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | |
1225 | if (!gdp) | |
1226 | goto io_error; | |
1227 | ||
1228 | goal_group = group_no; | |
1229 | retry: | |
1230 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | |
1231 | /* | |
1232 | * if there is not enough free blocks to make a new resevation | |
1233 | * turn off reservation for this allocation | |
1234 | */ | |
1235 | if (my_rsv && (free_blocks < windowsz) | |
1236 | && (rsv_is_empty(&my_rsv->rsv_window))) | |
1237 | my_rsv = NULL; | |
1238 | ||
1239 | if (free_blocks > 0) { | |
1240 | ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) % | |
1241 | EXT3_BLOCKS_PER_GROUP(sb)); | |
1242 | bitmap_bh = read_block_bitmap(sb, group_no); | |
1243 | if (!bitmap_bh) | |
1244 | goto io_error; | |
1245 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | |
1246 | bitmap_bh, ret_block, my_rsv, &fatal); | |
1247 | if (fatal) | |
1248 | goto out; | |
1249 | if (ret_block >= 0) | |
1250 | goto allocated; | |
1251 | } | |
1252 | ||
1253 | ngroups = EXT3_SB(sb)->s_groups_count; | |
1254 | smp_rmb(); | |
1255 | ||
1256 | /* | |
1257 | * Now search the rest of the groups. We assume that | |
1258 | * i and gdp correctly point to the last group visited. | |
1259 | */ | |
1260 | for (bgi = 0; bgi < ngroups; bgi++) { | |
1261 | group_no++; | |
1262 | if (group_no >= ngroups) | |
1263 | group_no = 0; | |
1264 | gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); | |
1265 | if (!gdp) { | |
1266 | *errp = -EIO; | |
1267 | goto out; | |
1268 | } | |
1269 | free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); | |
1270 | /* | |
1271 | * skip this group if the number of | |
1272 | * free blocks is less than half of the reservation | |
1273 | * window size. | |
1274 | */ | |
1275 | if (free_blocks <= (windowsz/2)) | |
1276 | continue; | |
1277 | ||
1278 | brelse(bitmap_bh); | |
1279 | bitmap_bh = read_block_bitmap(sb, group_no); | |
1280 | if (!bitmap_bh) | |
1281 | goto io_error; | |
1282 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | |
1283 | bitmap_bh, -1, my_rsv, &fatal); | |
1284 | if (fatal) | |
1285 | goto out; | |
1286 | if (ret_block >= 0) | |
1287 | goto allocated; | |
1288 | } | |
1289 | /* | |
1290 | * We may end up a bogus ealier ENOSPC error due to | |
1291 | * filesystem is "full" of reservations, but | |
1292 | * there maybe indeed free blocks avaliable on disk | |
1293 | * In this case, we just forget about the reservations | |
1294 | * just do block allocation as without reservations. | |
1295 | */ | |
1296 | if (my_rsv) { | |
1297 | my_rsv = NULL; | |
1298 | group_no = goal_group; | |
1299 | goto retry; | |
1300 | } | |
1301 | /* No space left on the device */ | |
1302 | *errp = -ENOSPC; | |
1303 | goto out; | |
1304 | ||
1305 | allocated: | |
1306 | ||
1307 | ext3_debug("using block group %d(%d)\n", | |
1308 | group_no, gdp->bg_free_blocks_count); | |
1309 | ||
1310 | BUFFER_TRACE(gdp_bh, "get_write_access"); | |
1311 | fatal = ext3_journal_get_write_access(handle, gdp_bh); | |
1312 | if (fatal) | |
1313 | goto out; | |
1314 | ||
1315 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) | |
1316 | + le32_to_cpu(es->s_first_data_block); | |
1317 | ||
1318 | if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || | |
1319 | target_block == le32_to_cpu(gdp->bg_inode_bitmap) || | |
1320 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), | |
1321 | EXT3_SB(sb)->s_itb_per_group)) | |
1322 | ext3_error(sb, "ext3_new_block", | |
1323 | "Allocating block in system zone - " | |
1324 | "block = %u", target_block); | |
1325 | ||
1326 | performed_allocation = 1; | |
1327 | ||
1328 | #ifdef CONFIG_JBD_DEBUG | |
1329 | { | |
1330 | struct buffer_head *debug_bh; | |
1331 | ||
1332 | /* Record bitmap buffer state in the newly allocated block */ | |
1333 | debug_bh = sb_find_get_block(sb, target_block); | |
1334 | if (debug_bh) { | |
1335 | BUFFER_TRACE(debug_bh, "state when allocated"); | |
1336 | BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); | |
1337 | brelse(debug_bh); | |
1338 | } | |
1339 | } | |
1340 | jbd_lock_bh_state(bitmap_bh); | |
1341 | spin_lock(sb_bgl_lock(sbi, group_no)); | |
1342 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { | |
1343 | if (ext3_test_bit(ret_block, | |
1344 | bh2jh(bitmap_bh)->b_committed_data)) { | |
1345 | printk("%s: block was unexpectedly set in " | |
1346 | "b_committed_data\n", __FUNCTION__); | |
1347 | } | |
1348 | } | |
1349 | ext3_debug("found bit %d\n", ret_block); | |
1350 | spin_unlock(sb_bgl_lock(sbi, group_no)); | |
1351 | jbd_unlock_bh_state(bitmap_bh); | |
1352 | #endif | |
1353 | ||
1354 | /* ret_block was blockgroup-relative. Now it becomes fs-relative */ | |
1355 | ret_block = target_block; | |
1356 | ||
1357 | if (ret_block >= le32_to_cpu(es->s_blocks_count)) { | |
1358 | ext3_error(sb, "ext3_new_block", | |
1359 | "block(%d) >= blocks count(%d) - " | |
1360 | "block_group = %d, es == %p ", ret_block, | |
1361 | le32_to_cpu(es->s_blocks_count), group_no, es); | |
1362 | goto out; | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * It is up to the caller to add the new buffer to a journal | |
1367 | * list of some description. We don't know in advance whether | |
1368 | * the caller wants to use it as metadata or data. | |
1369 | */ | |
1370 | ext3_debug("allocating block %d. Goal hits %d of %d.\n", | |
1371 | ret_block, goal_hits, goal_attempts); | |
1372 | ||
1373 | spin_lock(sb_bgl_lock(sbi, group_no)); | |
1374 | gdp->bg_free_blocks_count = | |
1375 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); | |
1376 | spin_unlock(sb_bgl_lock(sbi, group_no)); | |
1377 | percpu_counter_mod(&sbi->s_freeblocks_counter, -1); | |
1378 | ||
1379 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | |
1380 | err = ext3_journal_dirty_metadata(handle, gdp_bh); | |
1381 | if (!fatal) | |
1382 | fatal = err; | |
1383 | ||
1384 | sb->s_dirt = 1; | |
1385 | if (fatal) | |
1386 | goto out; | |
1387 | ||
1388 | *errp = 0; | |
1389 | brelse(bitmap_bh); | |
1390 | return ret_block; | |
1391 | ||
1392 | io_error: | |
1393 | *errp = -EIO; | |
1394 | out: | |
1395 | if (fatal) { | |
1396 | *errp = fatal; | |
1397 | ext3_std_error(sb, fatal); | |
1398 | } | |
1399 | /* | |
1400 | * Undo the block allocation | |
1401 | */ | |
1402 | if (!performed_allocation) | |
1403 | DQUOT_FREE_BLOCK(inode, 1); | |
1404 | brelse(bitmap_bh); | |
1405 | return 0; | |
1406 | } | |
1407 | ||
1408 | unsigned long ext3_count_free_blocks(struct super_block *sb) | |
1409 | { | |
1410 | unsigned long desc_count; | |
1411 | struct ext3_group_desc *gdp; | |
1412 | int i; | |
1413 | unsigned long ngroups; | |
1414 | #ifdef EXT3FS_DEBUG | |
1415 | struct ext3_super_block *es; | |
1416 | unsigned long bitmap_count, x; | |
1417 | struct buffer_head *bitmap_bh = NULL; | |
1418 | ||
1419 | lock_super(sb); | |
1420 | es = EXT3_SB(sb)->s_es; | |
1421 | desc_count = 0; | |
1422 | bitmap_count = 0; | |
1423 | gdp = NULL; | |
1424 | for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { | |
1425 | gdp = ext3_get_group_desc(sb, i, NULL); | |
1426 | if (!gdp) | |
1427 | continue; | |
1428 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | |
1429 | brelse(bitmap_bh); | |
1430 | bitmap_bh = read_block_bitmap(sb, i); | |
1431 | if (bitmap_bh == NULL) | |
1432 | continue; | |
1433 | ||
1434 | x = ext3_count_free(bitmap_bh, sb->s_blocksize); | |
1435 | printk("group %d: stored = %d, counted = %lu\n", | |
1436 | i, le16_to_cpu(gdp->bg_free_blocks_count), x); | |
1437 | bitmap_count += x; | |
1438 | } | |
1439 | brelse(bitmap_bh); | |
1440 | printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n", | |
1441 | le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); | |
1442 | unlock_super(sb); | |
1443 | return bitmap_count; | |
1444 | #else | |
1445 | desc_count = 0; | |
1446 | ngroups = EXT3_SB(sb)->s_groups_count; | |
1447 | smp_rmb(); | |
1448 | for (i = 0; i < ngroups; i++) { | |
1449 | gdp = ext3_get_group_desc(sb, i, NULL); | |
1450 | if (!gdp) | |
1451 | continue; | |
1452 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | |
1453 | } | |
1454 | ||
1455 | return desc_count; | |
1456 | #endif | |
1457 | } | |
1458 | ||
1459 | static inline int | |
1460 | block_in_use(unsigned long block, struct super_block *sb, unsigned char *map) | |
1461 | { | |
1462 | return ext3_test_bit ((block - | |
1463 | le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) % | |
1464 | EXT3_BLOCKS_PER_GROUP(sb), map); | |
1465 | } | |
1466 | ||
1467 | static inline int test_root(int a, int b) | |
1468 | { | |
1469 | int num = b; | |
1470 | ||
1471 | while (a > num) | |
1472 | num *= b; | |
1473 | return num == a; | |
1474 | } | |
1475 | ||
1476 | static int ext3_group_sparse(int group) | |
1477 | { | |
1478 | if (group <= 1) | |
1479 | return 1; | |
1480 | if (!(group & 1)) | |
1481 | return 0; | |
1482 | return (test_root(group, 7) || test_root(group, 5) || | |
1483 | test_root(group, 3)); | |
1484 | } | |
1485 | ||
1486 | /** | |
1487 | * ext3_bg_has_super - number of blocks used by the superblock in group | |
1488 | * @sb: superblock for filesystem | |
1489 | * @group: group number to check | |
1490 | * | |
1491 | * Return the number of blocks used by the superblock (primary or backup) | |
1492 | * in this group. Currently this will be only 0 or 1. | |
1493 | */ | |
1494 | int ext3_bg_has_super(struct super_block *sb, int group) | |
1495 | { | |
1496 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&& | |
1497 | !ext3_group_sparse(group)) | |
1498 | return 0; | |
1499 | return 1; | |
1500 | } | |
1501 | ||
1502 | /** | |
1503 | * ext3_bg_num_gdb - number of blocks used by the group table in group | |
1504 | * @sb: superblock for filesystem | |
1505 | * @group: group number to check | |
1506 | * | |
1507 | * Return the number of blocks used by the group descriptor table | |
1508 | * (primary or backup) in this group. In the future there may be a | |
1509 | * different number of descriptor blocks in each group. | |
1510 | */ | |
1511 | unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) | |
1512 | { | |
1513 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)&& | |
1514 | !ext3_group_sparse(group)) | |
1515 | return 0; | |
1516 | return EXT3_SB(sb)->s_gdb_count; | |
1517 | } | |
1518 | ||
1519 | #ifdef CONFIG_EXT3_CHECK | |
1520 | /* Called at mount-time, super-block is locked */ | |
1521 | void ext3_check_blocks_bitmap (struct super_block * sb) | |
1522 | { | |
1523 | struct ext3_super_block *es; | |
1524 | unsigned long desc_count, bitmap_count, x, j; | |
1525 | unsigned long desc_blocks; | |
1526 | struct buffer_head *bitmap_bh = NULL; | |
1527 | struct ext3_group_desc *gdp; | |
1528 | int i; | |
1529 | ||
1530 | es = EXT3_SB(sb)->s_es; | |
1531 | desc_count = 0; | |
1532 | bitmap_count = 0; | |
1533 | gdp = NULL; | |
1534 | for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { | |
1535 | gdp = ext3_get_group_desc (sb, i, NULL); | |
1536 | if (!gdp) | |
1537 | continue; | |
1538 | desc_count += le16_to_cpu(gdp->bg_free_blocks_count); | |
1539 | brelse(bitmap_bh); | |
1540 | bitmap_bh = read_block_bitmap(sb, i); | |
1541 | if (bitmap_bh == NULL) | |
1542 | continue; | |
1543 | ||
1544 | if (ext3_bg_has_super(sb, i) && | |
1545 | !ext3_test_bit(0, bitmap_bh->b_data)) | |
1546 | ext3_error(sb, __FUNCTION__, | |
1547 | "Superblock in group %d is marked free", i); | |
1548 | ||
1549 | desc_blocks = ext3_bg_num_gdb(sb, i); | |
1550 | for (j = 0; j < desc_blocks; j++) | |
1551 | if (!ext3_test_bit(j + 1, bitmap_bh->b_data)) | |
1552 | ext3_error(sb, __FUNCTION__, | |
1553 | "Descriptor block #%ld in group " | |
1554 | "%d is marked free", j, i); | |
1555 | ||
1556 | if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap), | |
1557 | sb, bitmap_bh->b_data)) | |
1558 | ext3_error (sb, "ext3_check_blocks_bitmap", | |
1559 | "Block bitmap for group %d is marked free", | |
1560 | i); | |
1561 | ||
1562 | if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap), | |
1563 | sb, bitmap_bh->b_data)) | |
1564 | ext3_error (sb, "ext3_check_blocks_bitmap", | |
1565 | "Inode bitmap for group %d is marked free", | |
1566 | i); | |
1567 | ||
1568 | for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++) | |
1569 | if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j, | |
1570 | sb, bitmap_bh->b_data)) | |
1571 | ext3_error (sb, "ext3_check_blocks_bitmap", | |
1572 | "Block #%d of the inode table in " | |
1573 | "group %d is marked free", j, i); | |
1574 | ||
1575 | x = ext3_count_free(bitmap_bh, sb->s_blocksize); | |
1576 | if (le16_to_cpu(gdp->bg_free_blocks_count) != x) | |
1577 | ext3_error (sb, "ext3_check_blocks_bitmap", | |
1578 | "Wrong free blocks count for group %d, " | |
1579 | "stored = %d, counted = %lu", i, | |
1580 | le16_to_cpu(gdp->bg_free_blocks_count), x); | |
1581 | bitmap_count += x; | |
1582 | } | |
1583 | brelse(bitmap_bh); | |
1584 | if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count) | |
1585 | ext3_error (sb, "ext3_check_blocks_bitmap", | |
1586 | "Wrong free blocks count in super block, " | |
1587 | "stored = %lu, counted = %lu", | |
1588 | (unsigned long)le32_to_cpu(es->s_free_blocks_count), | |
1589 | bitmap_count); | |
1590 | } | |
1591 | #endif |