]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/ext4/ialloc.c | |
3 | * | |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | |
5 | * Remy Card (card@masi.ibp.fr) | |
6 | * Laboratoire MASI - Institut Blaise Pascal | |
7 | * Universite Pierre et Marie Curie (Paris VI) | |
8 | * | |
9 | * BSD ufs-inspired inode and directory allocation by | |
10 | * Stephen Tweedie (sct@redhat.com), 1993 | |
11 | * Big-endian to little-endian byte-swapping/bitmaps by | |
12 | * David S. Miller (davem@caip.rutgers.edu), 1995 | |
13 | */ | |
14 | ||
15 | #include <linux/time.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/jbd2.h> | |
18 | #include <linux/stat.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/quotaops.h> | |
21 | #include <linux/buffer_head.h> | |
22 | #include <linux/random.h> | |
23 | #include <linux/bitops.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <asm/byteorder.h> | |
26 | ||
27 | #include "ext4.h" | |
28 | #include "ext4_jbd2.h" | |
29 | #include "xattr.h" | |
30 | #include "acl.h" | |
31 | ||
32 | #include <trace/events/ext4.h> | |
33 | ||
34 | /* | |
35 | * ialloc.c contains the inodes allocation and deallocation routines | |
36 | */ | |
37 | ||
38 | /* | |
39 | * The free inodes are managed by bitmaps. A file system contains several | |
40 | * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap | |
41 | * block for inodes, N blocks for the inode table and data blocks. | |
42 | * | |
43 | * The file system contains group descriptors which are located after the | |
44 | * super block. Each descriptor contains the number of the bitmap block and | |
45 | * the free blocks count in the block. | |
46 | */ | |
47 | ||
48 | /* | |
49 | * To avoid calling the atomic setbit hundreds or thousands of times, we only | |
50 | * need to use it within a single byte (to ensure we get endianness right). | |
51 | * We can use memset for the rest of the bitmap as there are no other users. | |
52 | */ | |
53 | void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap) | |
54 | { | |
55 | int i; | |
56 | ||
57 | if (start_bit >= end_bit) | |
58 | return; | |
59 | ||
60 | ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); | |
61 | for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++) | |
62 | ext4_set_bit(i, bitmap); | |
63 | if (i < end_bit) | |
64 | memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3); | |
65 | } | |
66 | ||
67 | /* Initializes an uninitialized inode bitmap */ | |
68 | static unsigned ext4_init_inode_bitmap(struct super_block *sb, | |
69 | struct buffer_head *bh, | |
70 | ext4_group_t block_group, | |
71 | struct ext4_group_desc *gdp) | |
72 | { | |
73 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
74 | ||
75 | J_ASSERT_BH(bh, buffer_locked(bh)); | |
76 | ||
77 | /* If checksum is bad mark all blocks and inodes use to prevent | |
78 | * allocation, essentially implementing a per-group read-only flag. */ | |
79 | if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { | |
80 | ext4_error(sb, "Checksum bad for group %u", block_group); | |
81 | ext4_free_blks_set(sb, gdp, 0); | |
82 | ext4_free_inodes_set(sb, gdp, 0); | |
83 | ext4_itable_unused_set(sb, gdp, 0); | |
84 | memset(bh->b_data, 0xff, sb->s_blocksize); | |
85 | return 0; | |
86 | } | |
87 | ||
88 | memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); | |
89 | ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, | |
90 | bh->b_data); | |
91 | ||
92 | return EXT4_INODES_PER_GROUP(sb); | |
93 | } | |
94 | ||
95 | /* | |
96 | * Read the inode allocation bitmap for a given block_group, reading | |
97 | * into the specified slot in the superblock's bitmap cache. | |
98 | * | |
99 | * Return buffer_head of bitmap on success or NULL. | |
100 | */ | |
101 | static struct buffer_head * | |
102 | ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) | |
103 | { | |
104 | struct ext4_group_desc *desc; | |
105 | struct buffer_head *bh = NULL; | |
106 | ext4_fsblk_t bitmap_blk; | |
107 | ||
108 | desc = ext4_get_group_desc(sb, block_group, NULL); | |
109 | if (!desc) | |
110 | return NULL; | |
111 | ||
112 | bitmap_blk = ext4_inode_bitmap(sb, desc); | |
113 | bh = sb_getblk(sb, bitmap_blk); | |
114 | if (unlikely(!bh)) { | |
115 | ext4_error(sb, "Cannot read inode bitmap - " | |
116 | "block_group = %u, inode_bitmap = %llu", | |
117 | block_group, bitmap_blk); | |
118 | return NULL; | |
119 | } | |
120 | if (bitmap_uptodate(bh)) | |
121 | return bh; | |
122 | ||
123 | lock_buffer(bh); | |
124 | if (bitmap_uptodate(bh)) { | |
125 | unlock_buffer(bh); | |
126 | return bh; | |
127 | } | |
128 | ||
129 | ext4_lock_group(sb, block_group); | |
130 | if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { | |
131 | ext4_init_inode_bitmap(sb, bh, block_group, desc); | |
132 | set_bitmap_uptodate(bh); | |
133 | set_buffer_uptodate(bh); | |
134 | ext4_unlock_group(sb, block_group); | |
135 | unlock_buffer(bh); | |
136 | return bh; | |
137 | } | |
138 | ext4_unlock_group(sb, block_group); | |
139 | ||
140 | if (buffer_uptodate(bh)) { | |
141 | /* | |
142 | * if not uninit if bh is uptodate, | |
143 | * bitmap is also uptodate | |
144 | */ | |
145 | set_bitmap_uptodate(bh); | |
146 | unlock_buffer(bh); | |
147 | return bh; | |
148 | } | |
149 | /* | |
150 | * submit the buffer_head for read. We can | |
151 | * safely mark the bitmap as uptodate now. | |
152 | * We do it here so the bitmap uptodate bit | |
153 | * get set with buffer lock held. | |
154 | */ | |
155 | set_bitmap_uptodate(bh); | |
156 | if (bh_submit_read(bh) < 0) { | |
157 | put_bh(bh); | |
158 | ext4_error(sb, "Cannot read inode bitmap - " | |
159 | "block_group = %u, inode_bitmap = %llu", | |
160 | block_group, bitmap_blk); | |
161 | return NULL; | |
162 | } | |
163 | return bh; | |
164 | } | |
165 | ||
166 | /* | |
167 | * NOTE! When we get the inode, we're the only people | |
168 | * that have access to it, and as such there are no | |
169 | * race conditions we have to worry about. The inode | |
170 | * is not on the hash-lists, and it cannot be reached | |
171 | * through the filesystem because the directory entry | |
172 | * has been deleted earlier. | |
173 | * | |
174 | * HOWEVER: we must make sure that we get no aliases, | |
175 | * which means that we have to call "clear_inode()" | |
176 | * _before_ we mark the inode not in use in the inode | |
177 | * bitmaps. Otherwise a newly created file might use | |
178 | * the same inode number (not actually the same pointer | |
179 | * though), and then we'd have two inodes sharing the | |
180 | * same inode number and space on the harddisk. | |
181 | */ | |
182 | void ext4_free_inode(handle_t *handle, struct inode *inode) | |
183 | { | |
184 | struct super_block *sb = inode->i_sb; | |
185 | int is_directory; | |
186 | unsigned long ino; | |
187 | struct buffer_head *bitmap_bh = NULL; | |
188 | struct buffer_head *bh2; | |
189 | ext4_group_t block_group; | |
190 | unsigned long bit; | |
191 | struct ext4_group_desc *gdp; | |
192 | struct ext4_super_block *es; | |
193 | struct ext4_sb_info *sbi; | |
194 | int fatal = 0, err, count, cleared; | |
195 | ||
196 | if (atomic_read(&inode->i_count) > 1) { | |
197 | printk(KERN_ERR "ext4_free_inode: inode has count=%d\n", | |
198 | atomic_read(&inode->i_count)); | |
199 | return; | |
200 | } | |
201 | if (inode->i_nlink) { | |
202 | printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n", | |
203 | inode->i_nlink); | |
204 | return; | |
205 | } | |
206 | if (!sb) { | |
207 | printk(KERN_ERR "ext4_free_inode: inode on " | |
208 | "nonexistent device\n"); | |
209 | return; | |
210 | } | |
211 | sbi = EXT4_SB(sb); | |
212 | ||
213 | ino = inode->i_ino; | |
214 | ext4_debug("freeing inode %lu\n", ino); | |
215 | trace_ext4_free_inode(inode); | |
216 | ||
217 | /* | |
218 | * Note: we must free any quota before locking the superblock, | |
219 | * as writing the quota to disk may need the lock as well. | |
220 | */ | |
221 | dquot_initialize(inode); | |
222 | ext4_xattr_delete_inode(handle, inode); | |
223 | dquot_free_inode(inode); | |
224 | dquot_drop(inode); | |
225 | ||
226 | is_directory = S_ISDIR(inode->i_mode); | |
227 | ||
228 | /* Do this BEFORE marking the inode not in use or returning an error */ | |
229 | ext4_clear_inode(inode); | |
230 | ||
231 | es = EXT4_SB(sb)->s_es; | |
232 | if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { | |
233 | ext4_error(sb, "reserved or nonexistent inode %lu", ino); | |
234 | goto error_return; | |
235 | } | |
236 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); | |
237 | bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); | |
238 | bitmap_bh = ext4_read_inode_bitmap(sb, block_group); | |
239 | if (!bitmap_bh) | |
240 | goto error_return; | |
241 | ||
242 | BUFFER_TRACE(bitmap_bh, "get_write_access"); | |
243 | fatal = ext4_journal_get_write_access(handle, bitmap_bh); | |
244 | if (fatal) | |
245 | goto error_return; | |
246 | ||
247 | fatal = -ESRCH; | |
248 | gdp = ext4_get_group_desc(sb, block_group, &bh2); | |
249 | if (gdp) { | |
250 | BUFFER_TRACE(bh2, "get_write_access"); | |
251 | fatal = ext4_journal_get_write_access(handle, bh2); | |
252 | } | |
253 | ext4_lock_group(sb, block_group); | |
254 | cleared = ext4_clear_bit(bit, bitmap_bh->b_data); | |
255 | if (fatal || !cleared) { | |
256 | ext4_unlock_group(sb, block_group); | |
257 | goto out; | |
258 | } | |
259 | ||
260 | count = ext4_free_inodes_count(sb, gdp) + 1; | |
261 | ext4_free_inodes_set(sb, gdp, count); | |
262 | if (is_directory) { | |
263 | count = ext4_used_dirs_count(sb, gdp) - 1; | |
264 | ext4_used_dirs_set(sb, gdp, count); | |
265 | percpu_counter_dec(&sbi->s_dirs_counter); | |
266 | } | |
267 | gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); | |
268 | ext4_unlock_group(sb, block_group); | |
269 | ||
270 | percpu_counter_inc(&sbi->s_freeinodes_counter); | |
271 | if (sbi->s_log_groups_per_flex) { | |
272 | ext4_group_t f = ext4_flex_group(sbi, block_group); | |
273 | ||
274 | atomic_inc(&sbi->s_flex_groups[f].free_inodes); | |
275 | if (is_directory) | |
276 | atomic_dec(&sbi->s_flex_groups[f].used_dirs); | |
277 | } | |
278 | BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); | |
279 | fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); | |
280 | out: | |
281 | if (cleared) { | |
282 | BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); | |
283 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | |
284 | if (!fatal) | |
285 | fatal = err; | |
286 | ext4_mark_super_dirty(sb); | |
287 | } else | |
288 | ext4_error(sb, "bit already cleared for inode %lu", ino); | |
289 | ||
290 | error_return: | |
291 | brelse(bitmap_bh); | |
292 | ext4_std_error(sb, fatal); | |
293 | } | |
294 | ||
295 | /* | |
296 | * There are two policies for allocating an inode. If the new inode is | |
297 | * a directory, then a forward search is made for a block group with both | |
298 | * free space and a low directory-to-inode ratio; if that fails, then of | |
299 | * the groups with above-average free space, that group with the fewest | |
300 | * directories already is chosen. | |
301 | * | |
302 | * For other inodes, search forward from the parent directory\'s block | |
303 | * group to find a free inode. | |
304 | */ | |
305 | static int find_group_dir(struct super_block *sb, struct inode *parent, | |
306 | ext4_group_t *best_group) | |
307 | { | |
308 | ext4_group_t ngroups = ext4_get_groups_count(sb); | |
309 | unsigned int freei, avefreei; | |
310 | struct ext4_group_desc *desc, *best_desc = NULL; | |
311 | ext4_group_t group; | |
312 | int ret = -1; | |
313 | ||
314 | freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter); | |
315 | avefreei = freei / ngroups; | |
316 | ||
317 | for (group = 0; group < ngroups; group++) { | |
318 | desc = ext4_get_group_desc(sb, group, NULL); | |
319 | if (!desc || !ext4_free_inodes_count(sb, desc)) | |
320 | continue; | |
321 | if (ext4_free_inodes_count(sb, desc) < avefreei) | |
322 | continue; | |
323 | if (!best_desc || | |
324 | (ext4_free_blks_count(sb, desc) > | |
325 | ext4_free_blks_count(sb, best_desc))) { | |
326 | *best_group = group; | |
327 | best_desc = desc; | |
328 | ret = 0; | |
329 | } | |
330 | } | |
331 | return ret; | |
332 | } | |
333 | ||
334 | #define free_block_ratio 10 | |
335 | ||
336 | static int find_group_flex(struct super_block *sb, struct inode *parent, | |
337 | ext4_group_t *best_group) | |
338 | { | |
339 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
340 | struct ext4_group_desc *desc; | |
341 | struct flex_groups *flex_group = sbi->s_flex_groups; | |
342 | ext4_group_t parent_group = EXT4_I(parent)->i_block_group; | |
343 | ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group); | |
344 | ext4_group_t ngroups = ext4_get_groups_count(sb); | |
345 | int flex_size = ext4_flex_bg_size(sbi); | |
346 | ext4_group_t best_flex = parent_fbg_group; | |
347 | int blocks_per_flex = sbi->s_blocks_per_group * flex_size; | |
348 | int flexbg_free_blocks; | |
349 | int flex_freeb_ratio; | |
350 | ext4_group_t n_fbg_groups; | |
351 | ext4_group_t i; | |
352 | ||
353 | n_fbg_groups = (ngroups + flex_size - 1) >> | |
354 | sbi->s_log_groups_per_flex; | |
355 | ||
356 | find_close_to_parent: | |
357 | flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks); | |
358 | flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex; | |
359 | if (atomic_read(&flex_group[best_flex].free_inodes) && | |
360 | flex_freeb_ratio > free_block_ratio) | |
361 | goto found_flexbg; | |
362 | ||
363 | if (best_flex && best_flex == parent_fbg_group) { | |
364 | best_flex--; | |
365 | goto find_close_to_parent; | |
366 | } | |
367 | ||
368 | for (i = 0; i < n_fbg_groups; i++) { | |
369 | if (i == parent_fbg_group || i == parent_fbg_group - 1) | |
370 | continue; | |
371 | ||
372 | flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks); | |
373 | flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex; | |
374 | ||
375 | if (flex_freeb_ratio > free_block_ratio && | |
376 | (atomic_read(&flex_group[i].free_inodes))) { | |
377 | best_flex = i; | |
378 | goto found_flexbg; | |
379 | } | |
380 | ||
381 | if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) || | |
382 | ((atomic_read(&flex_group[i].free_blocks) > | |
383 | atomic_read(&flex_group[best_flex].free_blocks)) && | |
384 | atomic_read(&flex_group[i].free_inodes))) | |
385 | best_flex = i; | |
386 | } | |
387 | ||
388 | if (!atomic_read(&flex_group[best_flex].free_inodes) || | |
389 | !atomic_read(&flex_group[best_flex].free_blocks)) | |
390 | return -1; | |
391 | ||
392 | found_flexbg: | |
393 | for (i = best_flex * flex_size; i < ngroups && | |
394 | i < (best_flex + 1) * flex_size; i++) { | |
395 | desc = ext4_get_group_desc(sb, i, NULL); | |
396 | if (ext4_free_inodes_count(sb, desc)) { | |
397 | *best_group = i; | |
398 | goto out; | |
399 | } | |
400 | } | |
401 | ||
402 | return -1; | |
403 | out: | |
404 | return 0; | |
405 | } | |
406 | ||
407 | struct orlov_stats { | |
408 | __u32 free_inodes; | |
409 | __u32 free_blocks; | |
410 | __u32 used_dirs; | |
411 | }; | |
412 | ||
413 | /* | |
414 | * Helper function for Orlov's allocator; returns critical information | |
415 | * for a particular block group or flex_bg. If flex_size is 1, then g | |
416 | * is a block group number; otherwise it is flex_bg number. | |
417 | */ | |
418 | static void get_orlov_stats(struct super_block *sb, ext4_group_t g, | |
419 | int flex_size, struct orlov_stats *stats) | |
420 | { | |
421 | struct ext4_group_desc *desc; | |
422 | struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups; | |
423 | ||
424 | if (flex_size > 1) { | |
425 | stats->free_inodes = atomic_read(&flex_group[g].free_inodes); | |
426 | stats->free_blocks = atomic_read(&flex_group[g].free_blocks); | |
427 | stats->used_dirs = atomic_read(&flex_group[g].used_dirs); | |
428 | return; | |
429 | } | |
430 | ||
431 | desc = ext4_get_group_desc(sb, g, NULL); | |
432 | if (desc) { | |
433 | stats->free_inodes = ext4_free_inodes_count(sb, desc); | |
434 | stats->free_blocks = ext4_free_blks_count(sb, desc); | |
435 | stats->used_dirs = ext4_used_dirs_count(sb, desc); | |
436 | } else { | |
437 | stats->free_inodes = 0; | |
438 | stats->free_blocks = 0; | |
439 | stats->used_dirs = 0; | |
440 | } | |
441 | } | |
442 | ||
443 | /* | |
444 | * Orlov's allocator for directories. | |
445 | * | |
446 | * We always try to spread first-level directories. | |
447 | * | |
448 | * If there are blockgroups with both free inodes and free blocks counts | |
449 | * not worse than average we return one with smallest directory count. | |
450 | * Otherwise we simply return a random group. | |
451 | * | |
452 | * For the rest rules look so: | |
453 | * | |
454 | * It's OK to put directory into a group unless | |
455 | * it has too many directories already (max_dirs) or | |
456 | * it has too few free inodes left (min_inodes) or | |
457 | * it has too few free blocks left (min_blocks) or | |
458 | * Parent's group is preferred, if it doesn't satisfy these | |
459 | * conditions we search cyclically through the rest. If none | |
460 | * of the groups look good we just look for a group with more | |
461 | * free inodes than average (starting at parent's group). | |
462 | */ | |
463 | ||
464 | static int find_group_orlov(struct super_block *sb, struct inode *parent, | |
465 | ext4_group_t *group, int mode, | |
466 | const struct qstr *qstr) | |
467 | { | |
468 | ext4_group_t parent_group = EXT4_I(parent)->i_block_group; | |
469 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
470 | ext4_group_t real_ngroups = ext4_get_groups_count(sb); | |
471 | int inodes_per_group = EXT4_INODES_PER_GROUP(sb); | |
472 | unsigned int freei, avefreei; | |
473 | ext4_fsblk_t freeb, avefreeb; | |
474 | unsigned int ndirs; | |
475 | int max_dirs, min_inodes; | |
476 | ext4_grpblk_t min_blocks; | |
477 | ext4_group_t i, grp, g, ngroups; | |
478 | struct ext4_group_desc *desc; | |
479 | struct orlov_stats stats; | |
480 | int flex_size = ext4_flex_bg_size(sbi); | |
481 | struct dx_hash_info hinfo; | |
482 | ||
483 | ngroups = real_ngroups; | |
484 | if (flex_size > 1) { | |
485 | ngroups = (real_ngroups + flex_size - 1) >> | |
486 | sbi->s_log_groups_per_flex; | |
487 | parent_group >>= sbi->s_log_groups_per_flex; | |
488 | } | |
489 | ||
490 | freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); | |
491 | avefreei = freei / ngroups; | |
492 | freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter); | |
493 | avefreeb = freeb; | |
494 | do_div(avefreeb, ngroups); | |
495 | ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); | |
496 | ||
497 | if (S_ISDIR(mode) && | |
498 | ((parent == sb->s_root->d_inode) || | |
499 | (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { | |
500 | int best_ndir = inodes_per_group; | |
501 | int ret = -1; | |
502 | ||
503 | if (qstr) { | |
504 | hinfo.hash_version = DX_HASH_HALF_MD4; | |
505 | hinfo.seed = sbi->s_hash_seed; | |
506 | ext4fs_dirhash(qstr->name, qstr->len, &hinfo); | |
507 | grp = hinfo.hash; | |
508 | } else | |
509 | get_random_bytes(&grp, sizeof(grp)); | |
510 | parent_group = (unsigned)grp % ngroups; | |
511 | for (i = 0; i < ngroups; i++) { | |
512 | g = (parent_group + i) % ngroups; | |
513 | get_orlov_stats(sb, g, flex_size, &stats); | |
514 | if (!stats.free_inodes) | |
515 | continue; | |
516 | if (stats.used_dirs >= best_ndir) | |
517 | continue; | |
518 | if (stats.free_inodes < avefreei) | |
519 | continue; | |
520 | if (stats.free_blocks < avefreeb) | |
521 | continue; | |
522 | grp = g; | |
523 | ret = 0; | |
524 | best_ndir = stats.used_dirs; | |
525 | } | |
526 | if (ret) | |
527 | goto fallback; | |
528 | found_flex_bg: | |
529 | if (flex_size == 1) { | |
530 | *group = grp; | |
531 | return 0; | |
532 | } | |
533 | ||
534 | /* | |
535 | * We pack inodes at the beginning of the flexgroup's | |
536 | * inode tables. Block allocation decisions will do | |
537 | * something similar, although regular files will | |
538 | * start at 2nd block group of the flexgroup. See | |
539 | * ext4_ext_find_goal() and ext4_find_near(). | |
540 | */ | |
541 | grp *= flex_size; | |
542 | for (i = 0; i < flex_size; i++) { | |
543 | if (grp+i >= real_ngroups) | |
544 | break; | |
545 | desc = ext4_get_group_desc(sb, grp+i, NULL); | |
546 | if (desc && ext4_free_inodes_count(sb, desc)) { | |
547 | *group = grp+i; | |
548 | return 0; | |
549 | } | |
550 | } | |
551 | goto fallback; | |
552 | } | |
553 | ||
554 | max_dirs = ndirs / ngroups + inodes_per_group / 16; | |
555 | min_inodes = avefreei - inodes_per_group*flex_size / 4; | |
556 | if (min_inodes < 1) | |
557 | min_inodes = 1; | |
558 | min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4; | |
559 | ||
560 | /* | |
561 | * Start looking in the flex group where we last allocated an | |
562 | * inode for this parent directory | |
563 | */ | |
564 | if (EXT4_I(parent)->i_last_alloc_group != ~0) { | |
565 | parent_group = EXT4_I(parent)->i_last_alloc_group; | |
566 | if (flex_size > 1) | |
567 | parent_group >>= sbi->s_log_groups_per_flex; | |
568 | } | |
569 | ||
570 | for (i = 0; i < ngroups; i++) { | |
571 | grp = (parent_group + i) % ngroups; | |
572 | get_orlov_stats(sb, grp, flex_size, &stats); | |
573 | if (stats.used_dirs >= max_dirs) | |
574 | continue; | |
575 | if (stats.free_inodes < min_inodes) | |
576 | continue; | |
577 | if (stats.free_blocks < min_blocks) | |
578 | continue; | |
579 | goto found_flex_bg; | |
580 | } | |
581 | ||
582 | fallback: | |
583 | ngroups = real_ngroups; | |
584 | avefreei = freei / ngroups; | |
585 | fallback_retry: | |
586 | parent_group = EXT4_I(parent)->i_block_group; | |
587 | for (i = 0; i < ngroups; i++) { | |
588 | grp = (parent_group + i) % ngroups; | |
589 | desc = ext4_get_group_desc(sb, grp, NULL); | |
590 | if (desc && ext4_free_inodes_count(sb, desc) && | |
591 | ext4_free_inodes_count(sb, desc) >= avefreei) { | |
592 | *group = grp; | |
593 | return 0; | |
594 | } | |
595 | } | |
596 | ||
597 | if (avefreei) { | |
598 | /* | |
599 | * The free-inodes counter is approximate, and for really small | |
600 | * filesystems the above test can fail to find any blockgroups | |
601 | */ | |
602 | avefreei = 0; | |
603 | goto fallback_retry; | |
604 | } | |
605 | ||
606 | return -1; | |
607 | } | |
608 | ||
609 | static int find_group_other(struct super_block *sb, struct inode *parent, | |
610 | ext4_group_t *group, int mode) | |
611 | { | |
612 | ext4_group_t parent_group = EXT4_I(parent)->i_block_group; | |
613 | ext4_group_t i, last, ngroups = ext4_get_groups_count(sb); | |
614 | struct ext4_group_desc *desc; | |
615 | int flex_size = ext4_flex_bg_size(EXT4_SB(sb)); | |
616 | ||
617 | /* | |
618 | * Try to place the inode is the same flex group as its | |
619 | * parent. If we can't find space, use the Orlov algorithm to | |
620 | * find another flex group, and store that information in the | |
621 | * parent directory's inode information so that use that flex | |
622 | * group for future allocations. | |
623 | */ | |
624 | if (flex_size > 1) { | |
625 | int retry = 0; | |
626 | ||
627 | try_again: | |
628 | parent_group &= ~(flex_size-1); | |
629 | last = parent_group + flex_size; | |
630 | if (last > ngroups) | |
631 | last = ngroups; | |
632 | for (i = parent_group; i < last; i++) { | |
633 | desc = ext4_get_group_desc(sb, i, NULL); | |
634 | if (desc && ext4_free_inodes_count(sb, desc)) { | |
635 | *group = i; | |
636 | return 0; | |
637 | } | |
638 | } | |
639 | if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) { | |
640 | retry = 1; | |
641 | parent_group = EXT4_I(parent)->i_last_alloc_group; | |
642 | goto try_again; | |
643 | } | |
644 | /* | |
645 | * If this didn't work, use the Orlov search algorithm | |
646 | * to find a new flex group; we pass in the mode to | |
647 | * avoid the topdir algorithms. | |
648 | */ | |
649 | *group = parent_group + flex_size; | |
650 | if (*group > ngroups) | |
651 | *group = 0; | |
652 | return find_group_orlov(sb, parent, group, mode, 0); | |
653 | } | |
654 | ||
655 | /* | |
656 | * Try to place the inode in its parent directory | |
657 | */ | |
658 | *group = parent_group; | |
659 | desc = ext4_get_group_desc(sb, *group, NULL); | |
660 | if (desc && ext4_free_inodes_count(sb, desc) && | |
661 | ext4_free_blks_count(sb, desc)) | |
662 | return 0; | |
663 | ||
664 | /* | |
665 | * We're going to place this inode in a different blockgroup from its | |
666 | * parent. We want to cause files in a common directory to all land in | |
667 | * the same blockgroup. But we want files which are in a different | |
668 | * directory which shares a blockgroup with our parent to land in a | |
669 | * different blockgroup. | |
670 | * | |
671 | * So add our directory's i_ino into the starting point for the hash. | |
672 | */ | |
673 | *group = (*group + parent->i_ino) % ngroups; | |
674 | ||
675 | /* | |
676 | * Use a quadratic hash to find a group with a free inode and some free | |
677 | * blocks. | |
678 | */ | |
679 | for (i = 1; i < ngroups; i <<= 1) { | |
680 | *group += i; | |
681 | if (*group >= ngroups) | |
682 | *group -= ngroups; | |
683 | desc = ext4_get_group_desc(sb, *group, NULL); | |
684 | if (desc && ext4_free_inodes_count(sb, desc) && | |
685 | ext4_free_blks_count(sb, desc)) | |
686 | return 0; | |
687 | } | |
688 | ||
689 | /* | |
690 | * That failed: try linear search for a free inode, even if that group | |
691 | * has no free blocks. | |
692 | */ | |
693 | *group = parent_group; | |
694 | for (i = 0; i < ngroups; i++) { | |
695 | if (++*group >= ngroups) | |
696 | *group = 0; | |
697 | desc = ext4_get_group_desc(sb, *group, NULL); | |
698 | if (desc && ext4_free_inodes_count(sb, desc)) | |
699 | return 0; | |
700 | } | |
701 | ||
702 | return -1; | |
703 | } | |
704 | ||
705 | /* | |
706 | * claim the inode from the inode bitmap. If the group | |
707 | * is uninit we need to take the groups's ext4_group_lock | |
708 | * and clear the uninit flag. The inode bitmap update | |
709 | * and group desc uninit flag clear should be done | |
710 | * after holding ext4_group_lock so that ext4_read_inode_bitmap | |
711 | * doesn't race with the ext4_claim_inode | |
712 | */ | |
713 | static int ext4_claim_inode(struct super_block *sb, | |
714 | struct buffer_head *inode_bitmap_bh, | |
715 | unsigned long ino, ext4_group_t group, int mode) | |
716 | { | |
717 | int free = 0, retval = 0, count; | |
718 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
719 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
720 | struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); | |
721 | ||
722 | /* | |
723 | * We have to be sure that new inode allocation does not race with | |
724 | * inode table initialization, because otherwise we may end up | |
725 | * allocating and writing new inode right before sb_issue_zeroout | |
726 | * takes place and overwriting our new inode with zeroes. So we | |
727 | * take alloc_sem to prevent it. | |
728 | */ | |
729 | down_read(&grp->alloc_sem); | |
730 | ext4_lock_group(sb, group); | |
731 | if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { | |
732 | /* not a free inode */ | |
733 | retval = 1; | |
734 | goto err_ret; | |
735 | } | |
736 | ino++; | |
737 | if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || | |
738 | ino > EXT4_INODES_PER_GROUP(sb)) { | |
739 | ext4_unlock_group(sb, group); | |
740 | up_read(&grp->alloc_sem); | |
741 | ext4_error(sb, "reserved inode or inode > inodes count - " | |
742 | "block_group = %u, inode=%lu", group, | |
743 | ino + group * EXT4_INODES_PER_GROUP(sb)); | |
744 | return 1; | |
745 | } | |
746 | /* If we didn't allocate from within the initialized part of the inode | |
747 | * table then we need to initialize up to this inode. */ | |
748 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { | |
749 | ||
750 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { | |
751 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT); | |
752 | /* When marking the block group with | |
753 | * ~EXT4_BG_INODE_UNINIT we don't want to depend | |
754 | * on the value of bg_itable_unused even though | |
755 | * mke2fs could have initialized the same for us. | |
756 | * Instead we calculated the value below | |
757 | */ | |
758 | ||
759 | free = 0; | |
760 | } else { | |
761 | free = EXT4_INODES_PER_GROUP(sb) - | |
762 | ext4_itable_unused_count(sb, gdp); | |
763 | } | |
764 | ||
765 | /* | |
766 | * Check the relative inode number against the last used | |
767 | * relative inode number in this group. if it is greater | |
768 | * we need to update the bg_itable_unused count | |
769 | * | |
770 | */ | |
771 | if (ino > free) | |
772 | ext4_itable_unused_set(sb, gdp, | |
773 | (EXT4_INODES_PER_GROUP(sb) - ino)); | |
774 | } | |
775 | count = ext4_free_inodes_count(sb, gdp) - 1; | |
776 | ext4_free_inodes_set(sb, gdp, count); | |
777 | if (S_ISDIR(mode)) { | |
778 | count = ext4_used_dirs_count(sb, gdp) + 1; | |
779 | ext4_used_dirs_set(sb, gdp, count); | |
780 | if (sbi->s_log_groups_per_flex) { | |
781 | ext4_group_t f = ext4_flex_group(sbi, group); | |
782 | ||
783 | atomic_inc(&sbi->s_flex_groups[f].used_dirs); | |
784 | } | |
785 | } | |
786 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); | |
787 | err_ret: | |
788 | ext4_unlock_group(sb, group); | |
789 | up_read(&grp->alloc_sem); | |
790 | return retval; | |
791 | } | |
792 | ||
793 | /* | |
794 | * There are two policies for allocating an inode. If the new inode is | |
795 | * a directory, then a forward search is made for a block group with both | |
796 | * free space and a low directory-to-inode ratio; if that fails, then of | |
797 | * the groups with above-average free space, that group with the fewest | |
798 | * directories already is chosen. | |
799 | * | |
800 | * For other inodes, search forward from the parent directory's block | |
801 | * group to find a free inode. | |
802 | */ | |
803 | struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode, | |
804 | const struct qstr *qstr, __u32 goal) | |
805 | { | |
806 | struct super_block *sb; | |
807 | struct buffer_head *inode_bitmap_bh = NULL; | |
808 | struct buffer_head *group_desc_bh; | |
809 | ext4_group_t ngroups, group = 0; | |
810 | unsigned long ino = 0; | |
811 | struct inode *inode; | |
812 | struct ext4_group_desc *gdp = NULL; | |
813 | struct ext4_inode_info *ei; | |
814 | struct ext4_sb_info *sbi; | |
815 | int ret2, err = 0; | |
816 | struct inode *ret; | |
817 | ext4_group_t i; | |
818 | int free = 0; | |
819 | static int once = 1; | |
820 | ext4_group_t flex_group; | |
821 | ||
822 | /* Cannot create files in a deleted directory */ | |
823 | if (!dir || !dir->i_nlink) | |
824 | return ERR_PTR(-EPERM); | |
825 | ||
826 | sb = dir->i_sb; | |
827 | ngroups = ext4_get_groups_count(sb); | |
828 | trace_ext4_request_inode(dir, mode); | |
829 | inode = new_inode(sb); | |
830 | if (!inode) | |
831 | return ERR_PTR(-ENOMEM); | |
832 | ei = EXT4_I(inode); | |
833 | sbi = EXT4_SB(sb); | |
834 | ||
835 | if (!goal) | |
836 | goal = sbi->s_inode_goal; | |
837 | ||
838 | if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) { | |
839 | group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); | |
840 | ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb); | |
841 | ret2 = 0; | |
842 | goto got_group; | |
843 | } | |
844 | ||
845 | if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) { | |
846 | ret2 = find_group_flex(sb, dir, &group); | |
847 | if (ret2 == -1) { | |
848 | ret2 = find_group_other(sb, dir, &group, mode); | |
849 | if (ret2 == 0 && once) { | |
850 | once = 0; | |
851 | printk(KERN_NOTICE "ext4: find_group_flex " | |
852 | "failed, fallback succeeded dir %lu\n", | |
853 | dir->i_ino); | |
854 | } | |
855 | } | |
856 | goto got_group; | |
857 | } | |
858 | ||
859 | if (S_ISDIR(mode)) { | |
860 | if (test_opt(sb, OLDALLOC)) | |
861 | ret2 = find_group_dir(sb, dir, &group); | |
862 | else | |
863 | ret2 = find_group_orlov(sb, dir, &group, mode, qstr); | |
864 | } else | |
865 | ret2 = find_group_other(sb, dir, &group, mode); | |
866 | ||
867 | got_group: | |
868 | EXT4_I(dir)->i_last_alloc_group = group; | |
869 | err = -ENOSPC; | |
870 | if (ret2 == -1) | |
871 | goto out; | |
872 | ||
873 | for (i = 0; i < ngroups; i++, ino = 0) { | |
874 | err = -EIO; | |
875 | ||
876 | gdp = ext4_get_group_desc(sb, group, &group_desc_bh); | |
877 | if (!gdp) | |
878 | goto fail; | |
879 | ||
880 | brelse(inode_bitmap_bh); | |
881 | inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); | |
882 | if (!inode_bitmap_bh) | |
883 | goto fail; | |
884 | ||
885 | repeat_in_this_group: | |
886 | ino = ext4_find_next_zero_bit((unsigned long *) | |
887 | inode_bitmap_bh->b_data, | |
888 | EXT4_INODES_PER_GROUP(sb), ino); | |
889 | ||
890 | if (ino < EXT4_INODES_PER_GROUP(sb)) { | |
891 | ||
892 | BUFFER_TRACE(inode_bitmap_bh, "get_write_access"); | |
893 | err = ext4_journal_get_write_access(handle, | |
894 | inode_bitmap_bh); | |
895 | if (err) | |
896 | goto fail; | |
897 | ||
898 | BUFFER_TRACE(group_desc_bh, "get_write_access"); | |
899 | err = ext4_journal_get_write_access(handle, | |
900 | group_desc_bh); | |
901 | if (err) | |
902 | goto fail; | |
903 | if (!ext4_claim_inode(sb, inode_bitmap_bh, | |
904 | ino, group, mode)) { | |
905 | /* we won it */ | |
906 | BUFFER_TRACE(inode_bitmap_bh, | |
907 | "call ext4_handle_dirty_metadata"); | |
908 | err = ext4_handle_dirty_metadata(handle, | |
909 | NULL, | |
910 | inode_bitmap_bh); | |
911 | if (err) | |
912 | goto fail; | |
913 | /* zero bit is inode number 1*/ | |
914 | ino++; | |
915 | goto got; | |
916 | } | |
917 | /* we lost it */ | |
918 | ext4_handle_release_buffer(handle, inode_bitmap_bh); | |
919 | ext4_handle_release_buffer(handle, group_desc_bh); | |
920 | ||
921 | if (++ino < EXT4_INODES_PER_GROUP(sb)) | |
922 | goto repeat_in_this_group; | |
923 | } | |
924 | ||
925 | /* | |
926 | * This case is possible in concurrent environment. It is very | |
927 | * rare. We cannot repeat the find_group_xxx() call because | |
928 | * that will simply return the same blockgroup, because the | |
929 | * group descriptor metadata has not yet been updated. | |
930 | * So we just go onto the next blockgroup. | |
931 | */ | |
932 | if (++group == ngroups) | |
933 | group = 0; | |
934 | } | |
935 | err = -ENOSPC; | |
936 | goto out; | |
937 | ||
938 | got: | |
939 | /* We may have to initialize the block bitmap if it isn't already */ | |
940 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && | |
941 | gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | |
942 | struct buffer_head *block_bitmap_bh; | |
943 | ||
944 | block_bitmap_bh = ext4_read_block_bitmap(sb, group); | |
945 | BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); | |
946 | err = ext4_journal_get_write_access(handle, block_bitmap_bh); | |
947 | if (err) { | |
948 | brelse(block_bitmap_bh); | |
949 | goto fail; | |
950 | } | |
951 | ||
952 | free = 0; | |
953 | ext4_lock_group(sb, group); | |
954 | /* recheck and clear flag under lock if we still need to */ | |
955 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { | |
956 | free = ext4_free_blocks_after_init(sb, group, gdp); | |
957 | gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); | |
958 | ext4_free_blks_set(sb, gdp, free); | |
959 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, | |
960 | gdp); | |
961 | } | |
962 | ext4_unlock_group(sb, group); | |
963 | ||
964 | /* Don't need to dirty bitmap block if we didn't change it */ | |
965 | if (free) { | |
966 | BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap"); | |
967 | err = ext4_handle_dirty_metadata(handle, | |
968 | NULL, block_bitmap_bh); | |
969 | } | |
970 | ||
971 | brelse(block_bitmap_bh); | |
972 | if (err) | |
973 | goto fail; | |
974 | } | |
975 | BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata"); | |
976 | err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh); | |
977 | if (err) | |
978 | goto fail; | |
979 | ||
980 | percpu_counter_dec(&sbi->s_freeinodes_counter); | |
981 | if (S_ISDIR(mode)) | |
982 | percpu_counter_inc(&sbi->s_dirs_counter); | |
983 | ext4_mark_super_dirty(sb); | |
984 | ||
985 | if (sbi->s_log_groups_per_flex) { | |
986 | flex_group = ext4_flex_group(sbi, group); | |
987 | atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); | |
988 | } | |
989 | ||
990 | if (test_opt(sb, GRPID)) { | |
991 | inode->i_mode = mode; | |
992 | inode->i_uid = current_fsuid(); | |
993 | inode->i_gid = dir->i_gid; | |
994 | } else | |
995 | inode_init_owner(inode, dir, mode); | |
996 | ||
997 | inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); | |
998 | /* This is the optimal IO size (for stat), not the fs block size */ | |
999 | inode->i_blocks = 0; | |
1000 | inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = | |
1001 | ext4_current_time(inode); | |
1002 | ||
1003 | memset(ei->i_data, 0, sizeof(ei->i_data)); | |
1004 | ei->i_dir_start_lookup = 0; | |
1005 | ei->i_disksize = 0; | |
1006 | ||
1007 | /* | |
1008 | * Don't inherit extent flag from directory, amongst others. We set | |
1009 | * extent flag on newly created directory and file only if -o extent | |
1010 | * mount option is specified | |
1011 | */ | |
1012 | ei->i_flags = | |
1013 | ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED); | |
1014 | ei->i_file_acl = 0; | |
1015 | ei->i_dtime = 0; | |
1016 | ei->i_block_group = group; | |
1017 | ei->i_last_alloc_group = ~0; | |
1018 | ||
1019 | ext4_set_inode_flags(inode); | |
1020 | if (IS_DIRSYNC(inode)) | |
1021 | ext4_handle_sync(handle); | |
1022 | if (insert_inode_locked(inode) < 0) { | |
1023 | err = -EINVAL; | |
1024 | goto fail_drop; | |
1025 | } | |
1026 | spin_lock(&sbi->s_next_gen_lock); | |
1027 | inode->i_generation = sbi->s_next_generation++; | |
1028 | spin_unlock(&sbi->s_next_gen_lock); | |
1029 | ||
1030 | ei->i_state_flags = 0; | |
1031 | ext4_set_inode_state(inode, EXT4_STATE_NEW); | |
1032 | ||
1033 | ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; | |
1034 | ||
1035 | ret = inode; | |
1036 | dquot_initialize(inode); | |
1037 | err = dquot_alloc_inode(inode); | |
1038 | if (err) | |
1039 | goto fail_drop; | |
1040 | ||
1041 | err = ext4_init_acl(handle, inode, dir); | |
1042 | if (err) | |
1043 | goto fail_free_drop; | |
1044 | ||
1045 | err = ext4_init_security(handle, inode, dir); | |
1046 | if (err) | |
1047 | goto fail_free_drop; | |
1048 | ||
1049 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { | |
1050 | /* set extent flag only for directory, file and normal symlink*/ | |
1051 | if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { | |
1052 | ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); | |
1053 | ext4_ext_tree_init(handle, inode); | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | err = ext4_mark_inode_dirty(handle, inode); | |
1058 | if (err) { | |
1059 | ext4_std_error(sb, err); | |
1060 | goto fail_free_drop; | |
1061 | } | |
1062 | ||
1063 | ext4_debug("allocating inode %lu\n", inode->i_ino); | |
1064 | trace_ext4_allocate_inode(inode, dir, mode); | |
1065 | goto really_out; | |
1066 | fail: | |
1067 | ext4_std_error(sb, err); | |
1068 | out: | |
1069 | iput(inode); | |
1070 | ret = ERR_PTR(err); | |
1071 | really_out: | |
1072 | brelse(inode_bitmap_bh); | |
1073 | return ret; | |
1074 | ||
1075 | fail_free_drop: | |
1076 | dquot_free_inode(inode); | |
1077 | ||
1078 | fail_drop: | |
1079 | dquot_drop(inode); | |
1080 | inode->i_flags |= S_NOQUOTA; | |
1081 | inode->i_nlink = 0; | |
1082 | unlock_new_inode(inode); | |
1083 | iput(inode); | |
1084 | brelse(inode_bitmap_bh); | |
1085 | return ERR_PTR(err); | |
1086 | } | |
1087 | ||
1088 | /* Verify that we are loading a valid orphan from disk */ | |
1089 | struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |
1090 | { | |
1091 | unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); | |
1092 | ext4_group_t block_group; | |
1093 | int bit; | |
1094 | struct buffer_head *bitmap_bh; | |
1095 | struct inode *inode = NULL; | |
1096 | long err = -EIO; | |
1097 | ||
1098 | /* Error cases - e2fsck has already cleaned up for us */ | |
1099 | if (ino > max_ino) { | |
1100 | ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino); | |
1101 | goto error; | |
1102 | } | |
1103 | ||
1104 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); | |
1105 | bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); | |
1106 | bitmap_bh = ext4_read_inode_bitmap(sb, block_group); | |
1107 | if (!bitmap_bh) { | |
1108 | ext4_warning(sb, "inode bitmap error for orphan %lu", ino); | |
1109 | goto error; | |
1110 | } | |
1111 | ||
1112 | /* Having the inode bit set should be a 100% indicator that this | |
1113 | * is a valid orphan (no e2fsck run on fs). Orphans also include | |
1114 | * inodes that were being truncated, so we can't check i_nlink==0. | |
1115 | */ | |
1116 | if (!ext4_test_bit(bit, bitmap_bh->b_data)) | |
1117 | goto bad_orphan; | |
1118 | ||
1119 | inode = ext4_iget(sb, ino); | |
1120 | if (IS_ERR(inode)) | |
1121 | goto iget_failed; | |
1122 | ||
1123 | /* | |
1124 | * If the orphans has i_nlinks > 0 then it should be able to be | |
1125 | * truncated, otherwise it won't be removed from the orphan list | |
1126 | * during processing and an infinite loop will result. | |
1127 | */ | |
1128 | if (inode->i_nlink && !ext4_can_truncate(inode)) | |
1129 | goto bad_orphan; | |
1130 | ||
1131 | if (NEXT_ORPHAN(inode) > max_ino) | |
1132 | goto bad_orphan; | |
1133 | brelse(bitmap_bh); | |
1134 | return inode; | |
1135 | ||
1136 | iget_failed: | |
1137 | err = PTR_ERR(inode); | |
1138 | inode = NULL; | |
1139 | bad_orphan: | |
1140 | ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino); | |
1141 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", | |
1142 | bit, (unsigned long long)bitmap_bh->b_blocknr, | |
1143 | ext4_test_bit(bit, bitmap_bh->b_data)); | |
1144 | printk(KERN_NOTICE "inode=%p\n", inode); | |
1145 | if (inode) { | |
1146 | printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", | |
1147 | is_bad_inode(inode)); | |
1148 | printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", | |
1149 | NEXT_ORPHAN(inode)); | |
1150 | printk(KERN_NOTICE "max_ino=%lu\n", max_ino); | |
1151 | printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink); | |
1152 | /* Avoid freeing blocks if we got a bad deleted inode */ | |
1153 | if (inode->i_nlink == 0) | |
1154 | inode->i_blocks = 0; | |
1155 | iput(inode); | |
1156 | } | |
1157 | brelse(bitmap_bh); | |
1158 | error: | |
1159 | return ERR_PTR(err); | |
1160 | } | |
1161 | ||
1162 | unsigned long ext4_count_free_inodes(struct super_block *sb) | |
1163 | { | |
1164 | unsigned long desc_count; | |
1165 | struct ext4_group_desc *gdp; | |
1166 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); | |
1167 | #ifdef EXT4FS_DEBUG | |
1168 | struct ext4_super_block *es; | |
1169 | unsigned long bitmap_count, x; | |
1170 | struct buffer_head *bitmap_bh = NULL; | |
1171 | ||
1172 | es = EXT4_SB(sb)->s_es; | |
1173 | desc_count = 0; | |
1174 | bitmap_count = 0; | |
1175 | gdp = NULL; | |
1176 | for (i = 0; i < ngroups; i++) { | |
1177 | gdp = ext4_get_group_desc(sb, i, NULL); | |
1178 | if (!gdp) | |
1179 | continue; | |
1180 | desc_count += ext4_free_inodes_count(sb, gdp); | |
1181 | brelse(bitmap_bh); | |
1182 | bitmap_bh = ext4_read_inode_bitmap(sb, i); | |
1183 | if (!bitmap_bh) | |
1184 | continue; | |
1185 | ||
1186 | x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); | |
1187 | printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", | |
1188 | (unsigned long) i, ext4_free_inodes_count(sb, gdp), x); | |
1189 | bitmap_count += x; | |
1190 | } | |
1191 | brelse(bitmap_bh); | |
1192 | printk(KERN_DEBUG "ext4_count_free_inodes: " | |
1193 | "stored = %u, computed = %lu, %lu\n", | |
1194 | le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); | |
1195 | return desc_count; | |
1196 | #else | |
1197 | desc_count = 0; | |
1198 | for (i = 0; i < ngroups; i++) { | |
1199 | gdp = ext4_get_group_desc(sb, i, NULL); | |
1200 | if (!gdp) | |
1201 | continue; | |
1202 | desc_count += ext4_free_inodes_count(sb, gdp); | |
1203 | cond_resched(); | |
1204 | } | |
1205 | return desc_count; | |
1206 | #endif | |
1207 | } | |
1208 | ||
1209 | /* Called at mount-time, super-block is locked */ | |
1210 | unsigned long ext4_count_dirs(struct super_block * sb) | |
1211 | { | |
1212 | unsigned long count = 0; | |
1213 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); | |
1214 | ||
1215 | for (i = 0; i < ngroups; i++) { | |
1216 | struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); | |
1217 | if (!gdp) | |
1218 | continue; | |
1219 | count += ext4_used_dirs_count(sb, gdp); | |
1220 | } | |
1221 | return count; | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * Zeroes not yet zeroed inode table - just write zeroes through the whole | |
1226 | * inode table. Must be called without any spinlock held. The only place | |
1227 | * where it is called from on active part of filesystem is ext4lazyinit | |
1228 | * thread, so we do not need any special locks, however we have to prevent | |
1229 | * inode allocation from the current group, so we take alloc_sem lock, to | |
1230 | * block ext4_claim_inode until we are finished. | |
1231 | */ | |
1232 | extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, | |
1233 | int barrier) | |
1234 | { | |
1235 | struct ext4_group_info *grp = ext4_get_group_info(sb, group); | |
1236 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
1237 | struct ext4_group_desc *gdp = NULL; | |
1238 | struct buffer_head *group_desc_bh; | |
1239 | handle_t *handle; | |
1240 | ext4_fsblk_t blk; | |
1241 | int num, ret = 0, used_blks = 0; | |
1242 | ||
1243 | /* This should not happen, but just to be sure check this */ | |
1244 | if (sb->s_flags & MS_RDONLY) { | |
1245 | ret = 1; | |
1246 | goto out; | |
1247 | } | |
1248 | ||
1249 | gdp = ext4_get_group_desc(sb, group, &group_desc_bh); | |
1250 | if (!gdp) | |
1251 | goto out; | |
1252 | ||
1253 | /* | |
1254 | * We do not need to lock this, because we are the only one | |
1255 | * handling this flag. | |
1256 | */ | |
1257 | if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) | |
1258 | goto out; | |
1259 | ||
1260 | handle = ext4_journal_start_sb(sb, 1); | |
1261 | if (IS_ERR(handle)) { | |
1262 | ret = PTR_ERR(handle); | |
1263 | goto out; | |
1264 | } | |
1265 | ||
1266 | down_write(&grp->alloc_sem); | |
1267 | /* | |
1268 | * If inode bitmap was already initialized there may be some | |
1269 | * used inodes so we need to skip blocks with used inodes in | |
1270 | * inode table. | |
1271 | */ | |
1272 | if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) | |
1273 | used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - | |
1274 | ext4_itable_unused_count(sb, gdp)), | |
1275 | sbi->s_inodes_per_block); | |
1276 | ||
1277 | if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { | |
1278 | ext4_error(sb, "Something is wrong with group %u\n" | |
1279 | "Used itable blocks: %d" | |
1280 | "itable unused count: %u\n", | |
1281 | group, used_blks, | |
1282 | ext4_itable_unused_count(sb, gdp)); | |
1283 | ret = 1; | |
1284 | goto out; | |
1285 | } | |
1286 | ||
1287 | blk = ext4_inode_table(sb, gdp) + used_blks; | |
1288 | num = sbi->s_itb_per_group - used_blks; | |
1289 | ||
1290 | BUFFER_TRACE(group_desc_bh, "get_write_access"); | |
1291 | ret = ext4_journal_get_write_access(handle, | |
1292 | group_desc_bh); | |
1293 | if (ret) | |
1294 | goto err_out; | |
1295 | ||
1296 | /* | |
1297 | * Skip zeroout if the inode table is full. But we set the ZEROED | |
1298 | * flag anyway, because obviously, when it is full it does not need | |
1299 | * further zeroing. | |
1300 | */ | |
1301 | if (unlikely(num == 0)) | |
1302 | goto skip_zeroout; | |
1303 | ||
1304 | ext4_debug("going to zero out inode table in group %d\n", | |
1305 | group); | |
1306 | ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS); | |
1307 | if (ret < 0) | |
1308 | goto err_out; | |
1309 | if (barrier) | |
1310 | blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL); | |
1311 | ||
1312 | skip_zeroout: | |
1313 | ext4_lock_group(sb, group); | |
1314 | gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); | |
1315 | gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); | |
1316 | ext4_unlock_group(sb, group); | |
1317 | ||
1318 | BUFFER_TRACE(group_desc_bh, | |
1319 | "call ext4_handle_dirty_metadata"); | |
1320 | ret = ext4_handle_dirty_metadata(handle, NULL, | |
1321 | group_desc_bh); | |
1322 | ||
1323 | err_out: | |
1324 | up_write(&grp->alloc_sem); | |
1325 | ext4_journal_stop(handle); | |
1326 | out: | |
1327 | return ret; | |
1328 | } |