]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/ext4/inode.c
ext4: Initialize preallocation list_head's properly
[net-next-2.6.git] / fs / ext4 / inode.c
CommitLineData
ac27a0ec 1/*
617ba13b 2 * linux/fs/ext4/inode.c
ac27a0ec
DK
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
21 *
617ba13b 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
ac27a0ec
DK
23 */
24
25#include <linux/module.h>
26#include <linux/fs.h>
27#include <linux/time.h>
dab291af 28#include <linux/jbd2.h>
ac27a0ec
DK
29#include <linux/highuid.h>
30#include <linux/pagemap.h>
31#include <linux/quotaops.h>
32#include <linux/string.h>
33#include <linux/buffer_head.h>
34#include <linux/writeback.h>
64769240 35#include <linux/pagevec.h>
ac27a0ec 36#include <linux/mpage.h>
e83c1397 37#include <linux/namei.h>
ac27a0ec
DK
38#include <linux/uio.h>
39#include <linux/bio.h>
3dcf5451 40#include "ext4_jbd2.h"
ac27a0ec
DK
41#include "xattr.h"
42#include "acl.h"
d2a17637 43#include "ext4_extents.h"
ac27a0ec 44
a1d6cc56
AK
45#define MPAGE_DA_EXTENT_TAIL 0x01
46
678aaf48
JK
47static inline int ext4_begin_ordered_truncate(struct inode *inode,
48 loff_t new_size)
49{
7f5aa215
JK
50 return jbd2_journal_begin_ordered_truncate(
51 EXT4_SB(inode->i_sb)->s_journal,
52 &EXT4_I(inode)->jinode,
53 new_size);
678aaf48
JK
54}
55
64769240
AT
56static void ext4_invalidatepage(struct page *page, unsigned long offset);
57
ac27a0ec
DK
58/*
59 * Test whether an inode is a fast symlink.
60 */
617ba13b 61static int ext4_inode_is_fast_symlink(struct inode *inode)
ac27a0ec 62{
617ba13b 63 int ea_blocks = EXT4_I(inode)->i_file_acl ?
ac27a0ec
DK
64 (inode->i_sb->s_blocksize >> 9) : 0;
65
66 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
67}
68
69/*
617ba13b 70 * The ext4 forget function must perform a revoke if we are freeing data
ac27a0ec
DK
71 * which has been journaled. Metadata (eg. indirect blocks) must be
72 * revoked in all cases.
73 *
74 * "bh" may be NULL: a metadata block may have been freed from memory
75 * but there may still be a record of it in the journal, and that record
76 * still needs to be revoked.
0390131b
FM
77 *
78 * If the handle isn't valid we're not journaling so there's nothing to do.
ac27a0ec 79 */
617ba13b
MC
80int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
81 struct buffer_head *bh, ext4_fsblk_t blocknr)
ac27a0ec
DK
82{
83 int err;
84
0390131b
FM
85 if (!ext4_handle_valid(handle))
86 return 0;
87
ac27a0ec
DK
88 might_sleep();
89
90 BUFFER_TRACE(bh, "enter");
91
92 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
93 "data mode %lx\n",
94 bh, is_metadata, inode->i_mode,
95 test_opt(inode->i_sb, DATA_FLAGS));
96
97 /* Never use the revoke function if we are doing full data
98 * journaling: there is no need to, and a V1 superblock won't
99 * support it. Otherwise, only skip the revoke on un-journaled
100 * data blocks. */
101
617ba13b
MC
102 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
103 (!is_metadata && !ext4_should_journal_data(inode))) {
ac27a0ec 104 if (bh) {
dab291af 105 BUFFER_TRACE(bh, "call jbd2_journal_forget");
617ba13b 106 return ext4_journal_forget(handle, bh);
ac27a0ec
DK
107 }
108 return 0;
109 }
110
111 /*
112 * data!=journal && (is_metadata || should_journal_data(inode))
113 */
617ba13b
MC
114 BUFFER_TRACE(bh, "call ext4_journal_revoke");
115 err = ext4_journal_revoke(handle, blocknr, bh);
ac27a0ec 116 if (err)
46e665e9 117 ext4_abort(inode->i_sb, __func__,
ac27a0ec
DK
118 "error %d when attempting revoke", err);
119 BUFFER_TRACE(bh, "exit");
120 return err;
121}
122
123/*
124 * Work out how many blocks we need to proceed with the next chunk of a
125 * truncate transaction.
126 */
127static unsigned long blocks_for_truncate(struct inode *inode)
128{
725d26d3 129 ext4_lblk_t needed;
ac27a0ec
DK
130
131 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
132
133 /* Give ourselves just enough room to cope with inodes in which
134 * i_blocks is corrupt: we've seen disk corruptions in the past
135 * which resulted in random data in an inode which looked enough
617ba13b 136 * like a regular file for ext4 to try to delete it. Things
ac27a0ec
DK
137 * will go a bit crazy if that happens, but at least we should
138 * try not to panic the whole kernel. */
139 if (needed < 2)
140 needed = 2;
141
142 /* But we need to bound the transaction so we don't overflow the
143 * journal. */
617ba13b
MC
144 if (needed > EXT4_MAX_TRANS_DATA)
145 needed = EXT4_MAX_TRANS_DATA;
ac27a0ec 146
617ba13b 147 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
ac27a0ec
DK
148}
149
150/*
151 * Truncate transactions can be complex and absolutely huge. So we need to
152 * be able to restart the transaction at a conventient checkpoint to make
153 * sure we don't overflow the journal.
154 *
155 * start_transaction gets us a new handle for a truncate transaction,
156 * and extend_transaction tries to extend the existing one a bit. If
157 * extend fails, we need to propagate the failure up and restart the
158 * transaction in the top-level truncate loop. --sct
159 */
160static handle_t *start_transaction(struct inode *inode)
161{
162 handle_t *result;
163
617ba13b 164 result = ext4_journal_start(inode, blocks_for_truncate(inode));
ac27a0ec
DK
165 if (!IS_ERR(result))
166 return result;
167
617ba13b 168 ext4_std_error(inode->i_sb, PTR_ERR(result));
ac27a0ec
DK
169 return result;
170}
171
172/*
173 * Try to extend this transaction for the purposes of truncation.
174 *
175 * Returns 0 if we managed to create more room. If we can't create more
176 * room, and the transaction must be restarted we return 1.
177 */
178static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
179{
0390131b
FM
180 if (!ext4_handle_valid(handle))
181 return 0;
182 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
ac27a0ec 183 return 0;
617ba13b 184 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
ac27a0ec
DK
185 return 0;
186 return 1;
187}
188
189/*
190 * Restart the transaction associated with *handle. This does a commit,
191 * so before we call here everything must be consistently dirtied against
192 * this transaction.
193 */
617ba13b 194static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
ac27a0ec 195{
0390131b 196 BUG_ON(EXT4_JOURNAL(inode) == NULL);
ac27a0ec 197 jbd_debug(2, "restarting handle %p\n", handle);
617ba13b 198 return ext4_journal_restart(handle, blocks_for_truncate(inode));
ac27a0ec
DK
199}
200
201/*
202 * Called at the last iput() if i_nlink is zero.
203 */
af5bc92d 204void ext4_delete_inode(struct inode *inode)
ac27a0ec
DK
205{
206 handle_t *handle;
bc965ab3 207 int err;
ac27a0ec 208
678aaf48
JK
209 if (ext4_should_order_data(inode))
210 ext4_begin_ordered_truncate(inode, 0);
ac27a0ec
DK
211 truncate_inode_pages(&inode->i_data, 0);
212
213 if (is_bad_inode(inode))
214 goto no_delete;
215
bc965ab3 216 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
ac27a0ec 217 if (IS_ERR(handle)) {
bc965ab3 218 ext4_std_error(inode->i_sb, PTR_ERR(handle));
ac27a0ec
DK
219 /*
220 * If we're going to skip the normal cleanup, we still need to
221 * make sure that the in-core orphan linked list is properly
222 * cleaned up.
223 */
617ba13b 224 ext4_orphan_del(NULL, inode);
ac27a0ec
DK
225 goto no_delete;
226 }
227
228 if (IS_SYNC(inode))
0390131b 229 ext4_handle_sync(handle);
ac27a0ec 230 inode->i_size = 0;
bc965ab3
TT
231 err = ext4_mark_inode_dirty(handle, inode);
232 if (err) {
233 ext4_warning(inode->i_sb, __func__,
234 "couldn't mark inode dirty (err %d)", err);
235 goto stop_handle;
236 }
ac27a0ec 237 if (inode->i_blocks)
617ba13b 238 ext4_truncate(inode);
bc965ab3
TT
239
240 /*
241 * ext4_ext_truncate() doesn't reserve any slop when it
242 * restarts journal transactions; therefore there may not be
243 * enough credits left in the handle to remove the inode from
244 * the orphan list and set the dtime field.
245 */
0390131b 246 if (!ext4_handle_has_enough_credits(handle, 3)) {
bc965ab3
TT
247 err = ext4_journal_extend(handle, 3);
248 if (err > 0)
249 err = ext4_journal_restart(handle, 3);
250 if (err != 0) {
251 ext4_warning(inode->i_sb, __func__,
252 "couldn't extend journal (err %d)", err);
253 stop_handle:
254 ext4_journal_stop(handle);
255 goto no_delete;
256 }
257 }
258
ac27a0ec 259 /*
617ba13b 260 * Kill off the orphan record which ext4_truncate created.
ac27a0ec 261 * AKPM: I think this can be inside the above `if'.
617ba13b 262 * Note that ext4_orphan_del() has to be able to cope with the
ac27a0ec 263 * deletion of a non-existent orphan - this is because we don't
617ba13b 264 * know if ext4_truncate() actually created an orphan record.
ac27a0ec
DK
265 * (Well, we could do this if we need to, but heck - it works)
266 */
617ba13b
MC
267 ext4_orphan_del(handle, inode);
268 EXT4_I(inode)->i_dtime = get_seconds();
ac27a0ec
DK
269
270 /*
271 * One subtle ordering requirement: if anything has gone wrong
272 * (transaction abort, IO errors, whatever), then we can still
273 * do these next steps (the fs will already have been marked as
274 * having errors), but we can't free the inode if the mark_dirty
275 * fails.
276 */
617ba13b 277 if (ext4_mark_inode_dirty(handle, inode))
ac27a0ec
DK
278 /* If that failed, just do the required in-core inode clear. */
279 clear_inode(inode);
280 else
617ba13b
MC
281 ext4_free_inode(handle, inode);
282 ext4_journal_stop(handle);
ac27a0ec
DK
283 return;
284no_delete:
285 clear_inode(inode); /* We must guarantee clearing of inode... */
286}
287
288typedef struct {
289 __le32 *p;
290 __le32 key;
291 struct buffer_head *bh;
292} Indirect;
293
294static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
295{
296 p->key = *(p->p = v);
297 p->bh = bh;
298}
299
ac27a0ec 300/**
617ba13b 301 * ext4_block_to_path - parse the block number into array of offsets
ac27a0ec
DK
302 * @inode: inode in question (we are only interested in its superblock)
303 * @i_block: block number to be parsed
304 * @offsets: array to store the offsets in
8c55e204
DK
305 * @boundary: set this non-zero if the referred-to block is likely to be
306 * followed (on disk) by an indirect block.
ac27a0ec 307 *
617ba13b 308 * To store the locations of file's data ext4 uses a data structure common
ac27a0ec
DK
309 * for UNIX filesystems - tree of pointers anchored in the inode, with
310 * data blocks at leaves and indirect blocks in intermediate nodes.
311 * This function translates the block number into path in that tree -
312 * return value is the path length and @offsets[n] is the offset of
313 * pointer to (n+1)th node in the nth one. If @block is out of range
314 * (negative or too large) warning is printed and zero returned.
315 *
316 * Note: function doesn't find node addresses, so no IO is needed. All
317 * we need to know is the capacity of indirect blocks (taken from the
318 * inode->i_sb).
319 */
320
321/*
322 * Portability note: the last comparison (check that we fit into triple
323 * indirect block) is spelled differently, because otherwise on an
324 * architecture with 32-bit longs and 8Kb pages we might get into trouble
325 * if our filesystem had 8Kb blocks. We might use long long, but that would
326 * kill us on x86. Oh, well, at least the sign propagation does not matter -
327 * i_block would have to be negative in the very beginning, so we would not
328 * get there at all.
329 */
330
617ba13b 331static int ext4_block_to_path(struct inode *inode,
725d26d3
AK
332 ext4_lblk_t i_block,
333 ext4_lblk_t offsets[4], int *boundary)
ac27a0ec 334{
617ba13b
MC
335 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
336 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
337 const long direct_blocks = EXT4_NDIR_BLOCKS,
ac27a0ec
DK
338 indirect_blocks = ptrs,
339 double_blocks = (1 << (ptrs_bits * 2));
340 int n = 0;
341 int final = 0;
342
343 if (i_block < 0) {
af5bc92d 344 ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
ac27a0ec
DK
345 } else if (i_block < direct_blocks) {
346 offsets[n++] = i_block;
347 final = direct_blocks;
af5bc92d 348 } else if ((i_block -= direct_blocks) < indirect_blocks) {
617ba13b 349 offsets[n++] = EXT4_IND_BLOCK;
ac27a0ec
DK
350 offsets[n++] = i_block;
351 final = ptrs;
352 } else if ((i_block -= indirect_blocks) < double_blocks) {
617ba13b 353 offsets[n++] = EXT4_DIND_BLOCK;
ac27a0ec
DK
354 offsets[n++] = i_block >> ptrs_bits;
355 offsets[n++] = i_block & (ptrs - 1);
356 final = ptrs;
357 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
617ba13b 358 offsets[n++] = EXT4_TIND_BLOCK;
ac27a0ec
DK
359 offsets[n++] = i_block >> (ptrs_bits * 2);
360 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
361 offsets[n++] = i_block & (ptrs - 1);
362 final = ptrs;
363 } else {
e2b46574 364 ext4_warning(inode->i_sb, "ext4_block_to_path",
06a279d6 365 "block %lu > max in inode %lu",
e2b46574 366 i_block + direct_blocks +
06a279d6 367 indirect_blocks + double_blocks, inode->i_ino);
ac27a0ec
DK
368 }
369 if (boundary)
370 *boundary = final - 1 - (i_block & (ptrs - 1));
371 return n;
372}
373
374/**
617ba13b 375 * ext4_get_branch - read the chain of indirect blocks leading to data
ac27a0ec
DK
376 * @inode: inode in question
377 * @depth: depth of the chain (1 - direct pointer, etc.)
378 * @offsets: offsets of pointers in inode/indirect blocks
379 * @chain: place to store the result
380 * @err: here we store the error value
381 *
382 * Function fills the array of triples <key, p, bh> and returns %NULL
383 * if everything went OK or the pointer to the last filled triple
384 * (incomplete one) otherwise. Upon the return chain[i].key contains
385 * the number of (i+1)-th block in the chain (as it is stored in memory,
386 * i.e. little-endian 32-bit), chain[i].p contains the address of that
387 * number (it points into struct inode for i==0 and into the bh->b_data
388 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
389 * block for i>0 and NULL for i==0. In other words, it holds the block
390 * numbers of the chain, addresses they were taken from (and where we can
391 * verify that chain did not change) and buffer_heads hosting these
392 * numbers.
393 *
394 * Function stops when it stumbles upon zero pointer (absent block)
395 * (pointer to last triple returned, *@err == 0)
396 * or when it gets an IO error reading an indirect block
397 * (ditto, *@err == -EIO)
ac27a0ec
DK
398 * or when it reads all @depth-1 indirect blocks successfully and finds
399 * the whole chain, all way to the data (returns %NULL, *err == 0).
c278bfec
AK
400 *
401 * Need to be called with
0e855ac8 402 * down_read(&EXT4_I(inode)->i_data_sem)
ac27a0ec 403 */
725d26d3
AK
404static Indirect *ext4_get_branch(struct inode *inode, int depth,
405 ext4_lblk_t *offsets,
ac27a0ec
DK
406 Indirect chain[4], int *err)
407{
408 struct super_block *sb = inode->i_sb;
409 Indirect *p = chain;
410 struct buffer_head *bh;
411
412 *err = 0;
413 /* i_data is not going away, no lock needed */
af5bc92d 414 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
ac27a0ec
DK
415 if (!p->key)
416 goto no_block;
417 while (--depth) {
418 bh = sb_bread(sb, le32_to_cpu(p->key));
419 if (!bh)
420 goto failure;
af5bc92d 421 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
ac27a0ec
DK
422 /* Reader: end */
423 if (!p->key)
424 goto no_block;
425 }
426 return NULL;
427
ac27a0ec
DK
428failure:
429 *err = -EIO;
430no_block:
431 return p;
432}
433
434/**
617ba13b 435 * ext4_find_near - find a place for allocation with sufficient locality
ac27a0ec
DK
436 * @inode: owner
437 * @ind: descriptor of indirect block.
438 *
1cc8dcf5 439 * This function returns the preferred place for block allocation.
ac27a0ec
DK
440 * It is used when heuristic for sequential allocation fails.
441 * Rules are:
442 * + if there is a block to the left of our position - allocate near it.
443 * + if pointer will live in indirect block - allocate near that block.
444 * + if pointer will live in inode - allocate in the same
445 * cylinder group.
446 *
447 * In the latter case we colour the starting block by the callers PID to
448 * prevent it from clashing with concurrent allocations for a different inode
449 * in the same block group. The PID is used here so that functionally related
450 * files will be close-by on-disk.
451 *
452 * Caller must make sure that @ind is valid and will stay that way.
453 */
617ba13b 454static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
ac27a0ec 455{
617ba13b 456 struct ext4_inode_info *ei = EXT4_I(inode);
af5bc92d 457 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
ac27a0ec 458 __le32 *p;
617ba13b 459 ext4_fsblk_t bg_start;
74d3487f 460 ext4_fsblk_t last_block;
617ba13b 461 ext4_grpblk_t colour;
ac27a0ec
DK
462
463 /* Try to find previous block */
464 for (p = ind->p - 1; p >= start; p--) {
465 if (*p)
466 return le32_to_cpu(*p);
467 }
468
469 /* No such thing, so let's try location of indirect block */
470 if (ind->bh)
471 return ind->bh->b_blocknr;
472
473 /*
474 * It is going to be referred to from the inode itself? OK, just put it
475 * into the same cylinder group then.
476 */
617ba13b 477 bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
74d3487f
VC
478 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
479
480 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
481 colour = (current->pid % 16) *
617ba13b 482 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
74d3487f
VC
483 else
484 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
ac27a0ec
DK
485 return bg_start + colour;
486}
487
488/**
1cc8dcf5 489 * ext4_find_goal - find a preferred place for allocation.
ac27a0ec
DK
490 * @inode: owner
491 * @block: block we want
ac27a0ec 492 * @partial: pointer to the last triple within a chain
ac27a0ec 493 *
1cc8dcf5 494 * Normally this function find the preferred place for block allocation,
fb01bfda 495 * returns it.
ac27a0ec 496 */
725d26d3 497static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
fb01bfda 498 Indirect *partial)
ac27a0ec 499{
ac27a0ec 500 /*
c2ea3fde 501 * XXX need to get goal block from mballoc's data structures
ac27a0ec 502 */
ac27a0ec 503
617ba13b 504 return ext4_find_near(inode, partial);
ac27a0ec
DK
505}
506
507/**
617ba13b 508 * ext4_blks_to_allocate: Look up the block map and count the number
ac27a0ec
DK
509 * of direct blocks need to be allocated for the given branch.
510 *
511 * @branch: chain of indirect blocks
512 * @k: number of blocks need for indirect blocks
513 * @blks: number of data blocks to be mapped.
514 * @blocks_to_boundary: the offset in the indirect block
515 *
516 * return the total number of blocks to be allocate, including the
517 * direct and indirect blocks.
518 */
498e5f24 519static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
ac27a0ec
DK
520 int blocks_to_boundary)
521{
498e5f24 522 unsigned int count = 0;
ac27a0ec
DK
523
524 /*
525 * Simple case, [t,d]Indirect block(s) has not allocated yet
526 * then it's clear blocks on that path have not allocated
527 */
528 if (k > 0) {
529 /* right now we don't handle cross boundary allocation */
530 if (blks < blocks_to_boundary + 1)
531 count += blks;
532 else
533 count += blocks_to_boundary + 1;
534 return count;
535 }
536
537 count++;
538 while (count < blks && count <= blocks_to_boundary &&
539 le32_to_cpu(*(branch[0].p + count)) == 0) {
540 count++;
541 }
542 return count;
543}
544
545/**
617ba13b 546 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
ac27a0ec
DK
547 * @indirect_blks: the number of blocks need to allocate for indirect
548 * blocks
549 *
550 * @new_blocks: on return it will store the new block numbers for
551 * the indirect blocks(if needed) and the first direct block,
552 * @blks: on return it will store the total number of allocated
553 * direct blocks
554 */
617ba13b 555static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
7061eba7
AK
556 ext4_lblk_t iblock, ext4_fsblk_t goal,
557 int indirect_blks, int blks,
558 ext4_fsblk_t new_blocks[4], int *err)
ac27a0ec 559{
815a1130 560 struct ext4_allocation_request ar;
ac27a0ec 561 int target, i;
7061eba7 562 unsigned long count = 0, blk_allocated = 0;
ac27a0ec 563 int index = 0;
617ba13b 564 ext4_fsblk_t current_block = 0;
ac27a0ec
DK
565 int ret = 0;
566
567 /*
568 * Here we try to allocate the requested multiple blocks at once,
569 * on a best-effort basis.
570 * To build a branch, we should allocate blocks for
571 * the indirect blocks(if not allocated yet), and at least
572 * the first direct block of this branch. That's the
573 * minimum number of blocks need to allocate(required)
574 */
7061eba7
AK
575 /* first we try to allocate the indirect blocks */
576 target = indirect_blks;
577 while (target > 0) {
ac27a0ec
DK
578 count = target;
579 /* allocating blocks for indirect blocks and direct blocks */
7061eba7
AK
580 current_block = ext4_new_meta_blocks(handle, inode,
581 goal, &count, err);
ac27a0ec
DK
582 if (*err)
583 goto failed_out;
584
585 target -= count;
586 /* allocate blocks for indirect blocks */
587 while (index < indirect_blks && count) {
588 new_blocks[index++] = current_block++;
589 count--;
590 }
7061eba7
AK
591 if (count > 0) {
592 /*
593 * save the new block number
594 * for the first direct block
595 */
596 new_blocks[index] = current_block;
597 printk(KERN_INFO "%s returned more blocks than "
598 "requested\n", __func__);
599 WARN_ON(1);
ac27a0ec 600 break;
7061eba7 601 }
ac27a0ec
DK
602 }
603
7061eba7
AK
604 target = blks - count ;
605 blk_allocated = count;
606 if (!target)
607 goto allocated;
608 /* Now allocate data blocks */
815a1130
TT
609 memset(&ar, 0, sizeof(ar));
610 ar.inode = inode;
611 ar.goal = goal;
612 ar.len = target;
613 ar.logical = iblock;
614 if (S_ISREG(inode->i_mode))
615 /* enable in-core preallocation only for regular files */
616 ar.flags = EXT4_MB_HINT_DATA;
617
618 current_block = ext4_mb_new_blocks(handle, &ar, err);
619
7061eba7
AK
620 if (*err && (target == blks)) {
621 /*
622 * if the allocation failed and we didn't allocate
623 * any blocks before
624 */
625 goto failed_out;
626 }
627 if (!*err) {
628 if (target == blks) {
629 /*
630 * save the new block number
631 * for the first direct block
632 */
633 new_blocks[index] = current_block;
634 }
815a1130 635 blk_allocated += ar.len;
7061eba7
AK
636 }
637allocated:
ac27a0ec 638 /* total number of blocks allocated for direct blocks */
7061eba7 639 ret = blk_allocated;
ac27a0ec
DK
640 *err = 0;
641 return ret;
642failed_out:
af5bc92d 643 for (i = 0; i < index; i++)
c9de560d 644 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
ac27a0ec
DK
645 return ret;
646}
647
648/**
617ba13b 649 * ext4_alloc_branch - allocate and set up a chain of blocks.
ac27a0ec
DK
650 * @inode: owner
651 * @indirect_blks: number of allocated indirect blocks
652 * @blks: number of allocated direct blocks
653 * @offsets: offsets (in the blocks) to store the pointers to next.
654 * @branch: place to store the chain in.
655 *
656 * This function allocates blocks, zeroes out all but the last one,
657 * links them into chain and (if we are synchronous) writes them to disk.
658 * In other words, it prepares a branch that can be spliced onto the
659 * inode. It stores the information about that chain in the branch[], in
617ba13b 660 * the same format as ext4_get_branch() would do. We are calling it after
ac27a0ec
DK
661 * we had read the existing part of chain and partial points to the last
662 * triple of that (one with zero ->key). Upon the exit we have the same
617ba13b 663 * picture as after the successful ext4_get_block(), except that in one
ac27a0ec
DK
664 * place chain is disconnected - *branch->p is still zero (we did not
665 * set the last link), but branch->key contains the number that should
666 * be placed into *branch->p to fill that gap.
667 *
668 * If allocation fails we free all blocks we've allocated (and forget
669 * their buffer_heads) and return the error value the from failed
617ba13b 670 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
ac27a0ec
DK
671 * as described above and return 0.
672 */
617ba13b 673static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
7061eba7
AK
674 ext4_lblk_t iblock, int indirect_blks,
675 int *blks, ext4_fsblk_t goal,
676 ext4_lblk_t *offsets, Indirect *branch)
ac27a0ec
DK
677{
678 int blocksize = inode->i_sb->s_blocksize;
679 int i, n = 0;
680 int err = 0;
681 struct buffer_head *bh;
682 int num;
617ba13b
MC
683 ext4_fsblk_t new_blocks[4];
684 ext4_fsblk_t current_block;
ac27a0ec 685
7061eba7 686 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
ac27a0ec
DK
687 *blks, new_blocks, &err);
688 if (err)
689 return err;
690
691 branch[0].key = cpu_to_le32(new_blocks[0]);
692 /*
693 * metadata blocks and data blocks are allocated.
694 */
695 for (n = 1; n <= indirect_blks; n++) {
696 /*
697 * Get buffer_head for parent block, zero it out
698 * and set the pointer to new one, then send
699 * parent to disk.
700 */
701 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
702 branch[n].bh = bh;
703 lock_buffer(bh);
704 BUFFER_TRACE(bh, "call get_create_access");
617ba13b 705 err = ext4_journal_get_create_access(handle, bh);
ac27a0ec
DK
706 if (err) {
707 unlock_buffer(bh);
708 brelse(bh);
709 goto failed;
710 }
711
712 memset(bh->b_data, 0, blocksize);
713 branch[n].p = (__le32 *) bh->b_data + offsets[n];
714 branch[n].key = cpu_to_le32(new_blocks[n]);
715 *branch[n].p = branch[n].key;
af5bc92d 716 if (n == indirect_blks) {
ac27a0ec
DK
717 current_block = new_blocks[n];
718 /*
719 * End of chain, update the last new metablock of
720 * the chain to point to the new allocated
721 * data blocks numbers
722 */
723 for (i=1; i < num; i++)
724 *(branch[n].p + i) = cpu_to_le32(++current_block);
725 }
726 BUFFER_TRACE(bh, "marking uptodate");
727 set_buffer_uptodate(bh);
728 unlock_buffer(bh);
729
0390131b
FM
730 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
731 err = ext4_handle_dirty_metadata(handle, inode, bh);
ac27a0ec
DK
732 if (err)
733 goto failed;
734 }
735 *blks = num;
736 return err;
737failed:
738 /* Allocation failed, free what we already allocated */
739 for (i = 1; i <= n ; i++) {
dab291af 740 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
617ba13b 741 ext4_journal_forget(handle, branch[i].bh);
ac27a0ec 742 }
af5bc92d 743 for (i = 0; i < indirect_blks; i++)
c9de560d 744 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
ac27a0ec 745
c9de560d 746 ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
ac27a0ec
DK
747
748 return err;
749}
750
751/**
617ba13b 752 * ext4_splice_branch - splice the allocated branch onto inode.
ac27a0ec
DK
753 * @inode: owner
754 * @block: (logical) number of block we are adding
755 * @chain: chain of indirect blocks (with a missing link - see
617ba13b 756 * ext4_alloc_branch)
ac27a0ec
DK
757 * @where: location of missing link
758 * @num: number of indirect blocks we are adding
759 * @blks: number of direct blocks we are adding
760 *
761 * This function fills the missing link and does all housekeeping needed in
762 * inode (->i_blocks, etc.). In case of success we end up with the full
763 * chain to new block and return 0.
764 */
617ba13b 765static int ext4_splice_branch(handle_t *handle, struct inode *inode,
725d26d3 766 ext4_lblk_t block, Indirect *where, int num, int blks)
ac27a0ec
DK
767{
768 int i;
769 int err = 0;
617ba13b 770 ext4_fsblk_t current_block;
ac27a0ec 771
ac27a0ec
DK
772 /*
773 * If we're splicing into a [td]indirect block (as opposed to the
774 * inode) then we need to get write access to the [td]indirect block
775 * before the splice.
776 */
777 if (where->bh) {
778 BUFFER_TRACE(where->bh, "get_write_access");
617ba13b 779 err = ext4_journal_get_write_access(handle, where->bh);
ac27a0ec
DK
780 if (err)
781 goto err_out;
782 }
783 /* That's it */
784
785 *where->p = where->key;
786
787 /*
788 * Update the host buffer_head or inode to point to more just allocated
789 * direct blocks blocks
790 */
791 if (num == 0 && blks > 1) {
792 current_block = le32_to_cpu(where->key) + 1;
793 for (i = 1; i < blks; i++)
af5bc92d 794 *(where->p + i) = cpu_to_le32(current_block++);
ac27a0ec
DK
795 }
796
ac27a0ec
DK
797 /* We are done with atomic stuff, now do the rest of housekeeping */
798
ef7f3835 799 inode->i_ctime = ext4_current_time(inode);
617ba13b 800 ext4_mark_inode_dirty(handle, inode);
ac27a0ec
DK
801
802 /* had we spliced it onto indirect block? */
803 if (where->bh) {
804 /*
805 * If we spliced it onto an indirect block, we haven't
806 * altered the inode. Note however that if it is being spliced
807 * onto an indirect block at the very end of the file (the
808 * file is growing) then we *will* alter the inode to reflect
809 * the new i_size. But that is not done here - it is done in
617ba13b 810 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
ac27a0ec
DK
811 */
812 jbd_debug(5, "splicing indirect only\n");
0390131b
FM
813 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
814 err = ext4_handle_dirty_metadata(handle, inode, where->bh);
ac27a0ec
DK
815 if (err)
816 goto err_out;
817 } else {
818 /*
819 * OK, we spliced it into the inode itself on a direct block.
820 * Inode was dirtied above.
821 */
822 jbd_debug(5, "splicing direct\n");
823 }
824 return err;
825
826err_out:
827 for (i = 1; i <= num; i++) {
dab291af 828 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
617ba13b 829 ext4_journal_forget(handle, where[i].bh);
c9de560d
AT
830 ext4_free_blocks(handle, inode,
831 le32_to_cpu(where[i-1].key), 1, 0);
ac27a0ec 832 }
c9de560d 833 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
ac27a0ec
DK
834
835 return err;
836}
837
838/*
839 * Allocation strategy is simple: if we have to allocate something, we will
840 * have to go the whole way to leaf. So let's do it before attaching anything
841 * to tree, set linkage between the newborn blocks, write them if sync is
842 * required, recheck the path, free and repeat if check fails, otherwise
843 * set the last missing link (that will protect us from any truncate-generated
844 * removals - all blocks on the path are immune now) and possibly force the
845 * write on the parent block.
846 * That has a nice additional property: no special recovery from the failed
847 * allocations is needed - we simply release blocks and do not touch anything
848 * reachable from inode.
849 *
850 * `handle' can be NULL if create == 0.
851 *
ac27a0ec
DK
852 * return > 0, # of blocks mapped or allocated.
853 * return = 0, if plain lookup failed.
854 * return < 0, error case.
c278bfec
AK
855 *
856 *
857 * Need to be called with
0e855ac8
AK
858 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
859 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
ac27a0ec 860 */
498e5f24
TT
861static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
862 ext4_lblk_t iblock, unsigned int maxblocks,
863 struct buffer_head *bh_result,
864 int create, int extend_disksize)
ac27a0ec
DK
865{
866 int err = -EIO;
725d26d3 867 ext4_lblk_t offsets[4];
ac27a0ec
DK
868 Indirect chain[4];
869 Indirect *partial;
617ba13b 870 ext4_fsblk_t goal;
ac27a0ec
DK
871 int indirect_blks;
872 int blocks_to_boundary = 0;
873 int depth;
617ba13b 874 struct ext4_inode_info *ei = EXT4_I(inode);
ac27a0ec 875 int count = 0;
617ba13b 876 ext4_fsblk_t first_block = 0;
61628a3f 877 loff_t disksize;
ac27a0ec
DK
878
879
a86c6181 880 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
ac27a0ec 881 J_ASSERT(handle != NULL || create == 0);
725d26d3
AK
882 depth = ext4_block_to_path(inode, iblock, offsets,
883 &blocks_to_boundary);
ac27a0ec
DK
884
885 if (depth == 0)
886 goto out;
887
617ba13b 888 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
ac27a0ec
DK
889
890 /* Simplest case - block found, no allocation needed */
891 if (!partial) {
892 first_block = le32_to_cpu(chain[depth - 1].key);
893 clear_buffer_new(bh_result);
894 count++;
895 /*map more blocks*/
896 while (count < maxblocks && count <= blocks_to_boundary) {
617ba13b 897 ext4_fsblk_t blk;
ac27a0ec 898
ac27a0ec
DK
899 blk = le32_to_cpu(*(chain[depth-1].p + count));
900
901 if (blk == first_block + count)
902 count++;
903 else
904 break;
905 }
c278bfec 906 goto got_it;
ac27a0ec
DK
907 }
908
909 /* Next simple case - plain lookup or failed read of indirect block */
910 if (!create || err == -EIO)
911 goto cleanup;
912
ac27a0ec 913 /*
c2ea3fde 914 * Okay, we need to do block allocation.
ac27a0ec 915 */
fb01bfda 916 goal = ext4_find_goal(inode, iblock, partial);
ac27a0ec
DK
917
918 /* the number of blocks need to allocate for [d,t]indirect blocks */
919 indirect_blks = (chain + depth) - partial - 1;
920
921 /*
922 * Next look up the indirect map to count the totoal number of
923 * direct blocks to allocate for this branch.
924 */
617ba13b 925 count = ext4_blks_to_allocate(partial, indirect_blks,
ac27a0ec
DK
926 maxblocks, blocks_to_boundary);
927 /*
617ba13b 928 * Block out ext4_truncate while we alter the tree
ac27a0ec 929 */
7061eba7
AK
930 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
931 &count, goal,
932 offsets + (partial - chain), partial);
ac27a0ec
DK
933
934 /*
617ba13b 935 * The ext4_splice_branch call will free and forget any buffers
ac27a0ec
DK
936 * on the new chain if there is a failure, but that risks using
937 * up transaction credits, especially for bitmaps where the
938 * credits cannot be returned. Can we handle this somehow? We
939 * may need to return -EAGAIN upwards in the worst case. --sct
940 */
941 if (!err)
617ba13b 942 err = ext4_splice_branch(handle, inode, iblock,
ac27a0ec
DK
943 partial, indirect_blks, count);
944 /*
0e855ac8 945 * i_disksize growing is protected by i_data_sem. Don't forget to
ac27a0ec 946 * protect it if you're about to implement concurrent
617ba13b 947 * ext4_get_block() -bzzz
ac27a0ec 948 */
61628a3f
MC
949 if (!err && extend_disksize) {
950 disksize = ((loff_t) iblock + count) << inode->i_blkbits;
951 if (disksize > i_size_read(inode))
952 disksize = i_size_read(inode);
953 if (disksize > ei->i_disksize)
954 ei->i_disksize = disksize;
955 }
ac27a0ec
DK
956 if (err)
957 goto cleanup;
958
959 set_buffer_new(bh_result);
960got_it:
961 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
962 if (count > blocks_to_boundary)
963 set_buffer_boundary(bh_result);
964 err = count;
965 /* Clean up and exit */
966 partial = chain + depth - 1; /* the whole chain */
967cleanup:
968 while (partial > chain) {
969 BUFFER_TRACE(partial->bh, "call brelse");
970 brelse(partial->bh);
971 partial--;
972 }
973 BUFFER_TRACE(bh_result, "returned");
974out:
975 return err;
976}
977
12219aea
AK
978/*
979 * Calculate the number of metadata blocks need to reserve
980 * to allocate @blocks for non extent file based file
981 */
982static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
983{
984 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
985 int ind_blks, dind_blks, tind_blks;
986
987 /* number of new indirect blocks needed */
988 ind_blks = (blocks + icap - 1) / icap;
989
990 dind_blks = (ind_blks + icap - 1) / icap;
991
992 tind_blks = 1;
993
994 return ind_blks + dind_blks + tind_blks;
995}
996
997/*
998 * Calculate the number of metadata blocks need to reserve
999 * to allocate given number of blocks
1000 */
1001static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
1002{
cd213226
MC
1003 if (!blocks)
1004 return 0;
1005
12219aea
AK
1006 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1007 return ext4_ext_calc_metadata_amount(inode, blocks);
1008
1009 return ext4_indirect_calc_metadata_amount(inode, blocks);
1010}
1011
1012static void ext4_da_update_reserve_space(struct inode *inode, int used)
1013{
1014 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1015 int total, mdb, mdb_free;
1016
1017 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1018 /* recalculate the number of metablocks still need to be reserved */
1019 total = EXT4_I(inode)->i_reserved_data_blocks - used;
1020 mdb = ext4_calc_metadata_amount(inode, total);
1021
1022 /* figure out how many metablocks to release */
1023 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1024 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1025
6bc6e63f
AK
1026 if (mdb_free) {
1027 /* Account for allocated meta_blocks */
1028 mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
1029
1030 /* update fs dirty blocks counter */
1031 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1032 EXT4_I(inode)->i_allocated_meta_blocks = 0;
1033 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1034 }
12219aea
AK
1035
1036 /* update per-inode reservations */
1037 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
1038 EXT4_I(inode)->i_reserved_data_blocks -= used;
1039
12219aea
AK
1040 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1041}
1042
f5ab0d1f 1043/*
2b2d6d01
TT
1044 * The ext4_get_blocks_wrap() function try to look up the requested blocks,
1045 * and returns if the blocks are already mapped.
f5ab0d1f 1046 *
f5ab0d1f
MC
1047 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1048 * and store the allocated blocks in the result buffer head and mark it
1049 * mapped.
1050 *
1051 * If file type is extents based, it will call ext4_ext_get_blocks(),
1052 * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
1053 * based files
1054 *
1055 * On success, it returns the number of blocks being mapped or allocate.
1056 * if create==0 and the blocks are pre-allocated and uninitialized block,
1057 * the result buffer head is unmapped. If the create ==1, it will make sure
1058 * the buffer head is mapped.
1059 *
1060 * It returns 0 if plain look up failed (blocks have not been allocated), in
1061 * that casem, buffer head is unmapped
1062 *
1063 * It returns the error in case of allocation failure.
1064 */
0e855ac8 1065int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
498e5f24 1066 unsigned int max_blocks, struct buffer_head *bh,
d2a17637 1067 int create, int extend_disksize, int flag)
0e855ac8
AK
1068{
1069 int retval;
f5ab0d1f
MC
1070
1071 clear_buffer_mapped(bh);
1072
4df3d265
AK
1073 /*
1074 * Try to see if we can get the block without requesting
1075 * for new file system block.
1076 */
1077 down_read((&EXT4_I(inode)->i_data_sem));
1078 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1079 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1080 bh, 0, 0);
0e855ac8 1081 } else {
4df3d265
AK
1082 retval = ext4_get_blocks_handle(handle,
1083 inode, block, max_blocks, bh, 0, 0);
0e855ac8 1084 }
4df3d265 1085 up_read((&EXT4_I(inode)->i_data_sem));
f5ab0d1f
MC
1086
1087 /* If it is only a block(s) look up */
1088 if (!create)
1089 return retval;
1090
1091 /*
1092 * Returns if the blocks have already allocated
1093 *
1094 * Note that if blocks have been preallocated
1095 * ext4_ext_get_block() returns th create = 0
1096 * with buffer head unmapped.
1097 */
1098 if (retval > 0 && buffer_mapped(bh))
4df3d265
AK
1099 return retval;
1100
1101 /*
f5ab0d1f
MC
1102 * New blocks allocate and/or writing to uninitialized extent
1103 * will possibly result in updating i_data, so we take
1104 * the write lock of i_data_sem, and call get_blocks()
1105 * with create == 1 flag.
4df3d265
AK
1106 */
1107 down_write((&EXT4_I(inode)->i_data_sem));
d2a17637
MC
1108
1109 /*
1110 * if the caller is from delayed allocation writeout path
1111 * we have already reserved fs blocks for allocation
1112 * let the underlying get_block() function know to
1113 * avoid double accounting
1114 */
1115 if (flag)
1116 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
4df3d265
AK
1117 /*
1118 * We need to check for EXT4 here because migrate
1119 * could have changed the inode type in between
1120 */
0e855ac8
AK
1121 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1122 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1123 bh, create, extend_disksize);
1124 } else {
1125 retval = ext4_get_blocks_handle(handle, inode, block,
1126 max_blocks, bh, create, extend_disksize);
267e4db9
AK
1127
1128 if (retval > 0 && buffer_new(bh)) {
1129 /*
1130 * We allocated new blocks which will result in
1131 * i_data's format changing. Force the migrate
1132 * to fail by clearing migrate flags
1133 */
1134 EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1135 ~EXT4_EXT_MIGRATE;
1136 }
0e855ac8 1137 }
d2a17637
MC
1138
1139 if (flag) {
1140 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1141 /*
1142 * Update reserved blocks/metadata blocks
1143 * after successful block allocation
1144 * which were deferred till now
1145 */
1146 if ((retval > 0) && buffer_delay(bh))
12219aea 1147 ext4_da_update_reserve_space(inode, retval);
d2a17637
MC
1148 }
1149
4df3d265 1150 up_write((&EXT4_I(inode)->i_data_sem));
0e855ac8
AK
1151 return retval;
1152}
1153
f3bd1f3f
MC
1154/* Maximum number of blocks we map for direct IO at once. */
1155#define DIO_MAX_BLOCKS 4096
1156
6873fa0d
ES
1157int ext4_get_block(struct inode *inode, sector_t iblock,
1158 struct buffer_head *bh_result, int create)
ac27a0ec 1159{
3e4fdaf8 1160 handle_t *handle = ext4_journal_current_handle();
7fb5409d 1161 int ret = 0, started = 0;
ac27a0ec 1162 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
f3bd1f3f 1163 int dio_credits;
ac27a0ec 1164
7fb5409d
JK
1165 if (create && !handle) {
1166 /* Direct IO write... */
1167 if (max_blocks > DIO_MAX_BLOCKS)
1168 max_blocks = DIO_MAX_BLOCKS;
f3bd1f3f
MC
1169 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
1170 handle = ext4_journal_start(inode, dio_credits);
7fb5409d 1171 if (IS_ERR(handle)) {
ac27a0ec 1172 ret = PTR_ERR(handle);
7fb5409d 1173 goto out;
ac27a0ec 1174 }
7fb5409d 1175 started = 1;
ac27a0ec
DK
1176 }
1177
7fb5409d 1178 ret = ext4_get_blocks_wrap(handle, inode, iblock,
d2a17637 1179 max_blocks, bh_result, create, 0, 0);
7fb5409d
JK
1180 if (ret > 0) {
1181 bh_result->b_size = (ret << inode->i_blkbits);
1182 ret = 0;
ac27a0ec 1183 }
7fb5409d
JK
1184 if (started)
1185 ext4_journal_stop(handle);
1186out:
ac27a0ec
DK
1187 return ret;
1188}
1189
1190/*
1191 * `handle' can be NULL if create is zero
1192 */
617ba13b 1193struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
725d26d3 1194 ext4_lblk_t block, int create, int *errp)
ac27a0ec
DK
1195{
1196 struct buffer_head dummy;
1197 int fatal = 0, err;
1198
1199 J_ASSERT(handle != NULL || create == 0);
1200
1201 dummy.b_state = 0;
1202 dummy.b_blocknr = -1000;
1203 buffer_trace_init(&dummy.b_history);
a86c6181 1204 err = ext4_get_blocks_wrap(handle, inode, block, 1,
d2a17637 1205 &dummy, create, 1, 0);
ac27a0ec 1206 /*
617ba13b 1207 * ext4_get_blocks_handle() returns number of blocks
ac27a0ec
DK
1208 * mapped. 0 in case of a HOLE.
1209 */
1210 if (err > 0) {
1211 if (err > 1)
1212 WARN_ON(1);
1213 err = 0;
1214 }
1215 *errp = err;
1216 if (!err && buffer_mapped(&dummy)) {
1217 struct buffer_head *bh;
1218 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1219 if (!bh) {
1220 *errp = -EIO;
1221 goto err;
1222 }
1223 if (buffer_new(&dummy)) {
1224 J_ASSERT(create != 0);
ac39849d 1225 J_ASSERT(handle != NULL);
ac27a0ec
DK
1226
1227 /*
1228 * Now that we do not always journal data, we should
1229 * keep in mind whether this should always journal the
1230 * new buffer as metadata. For now, regular file
617ba13b 1231 * writes use ext4_get_block instead, so it's not a
ac27a0ec
DK
1232 * problem.
1233 */
1234 lock_buffer(bh);
1235 BUFFER_TRACE(bh, "call get_create_access");
617ba13b 1236 fatal = ext4_journal_get_create_access(handle, bh);
ac27a0ec 1237 if (!fatal && !buffer_uptodate(bh)) {
af5bc92d 1238 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
ac27a0ec
DK
1239 set_buffer_uptodate(bh);
1240 }
1241 unlock_buffer(bh);
0390131b
FM
1242 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1243 err = ext4_handle_dirty_metadata(handle, inode, bh);
ac27a0ec
DK
1244 if (!fatal)
1245 fatal = err;
1246 } else {
1247 BUFFER_TRACE(bh, "not a new buffer");
1248 }
1249 if (fatal) {
1250 *errp = fatal;
1251 brelse(bh);
1252 bh = NULL;
1253 }
1254 return bh;
1255 }
1256err:
1257 return NULL;
1258}
1259
617ba13b 1260struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
725d26d3 1261 ext4_lblk_t block, int create, int *err)
ac27a0ec 1262{
af5bc92d 1263 struct buffer_head *bh;
ac27a0ec 1264
617ba13b 1265 bh = ext4_getblk(handle, inode, block, create, err);
ac27a0ec
DK
1266 if (!bh)
1267 return bh;
1268 if (buffer_uptodate(bh))
1269 return bh;
1270 ll_rw_block(READ_META, 1, &bh);
1271 wait_on_buffer(bh);
1272 if (buffer_uptodate(bh))
1273 return bh;
1274 put_bh(bh);
1275 *err = -EIO;
1276 return NULL;
1277}
1278
af5bc92d
TT
1279static int walk_page_buffers(handle_t *handle,
1280 struct buffer_head *head,
1281 unsigned from,
1282 unsigned to,
1283 int *partial,
1284 int (*fn)(handle_t *handle,
1285 struct buffer_head *bh))
ac27a0ec
DK
1286{
1287 struct buffer_head *bh;
1288 unsigned block_start, block_end;
1289 unsigned blocksize = head->b_size;
1290 int err, ret = 0;
1291 struct buffer_head *next;
1292
af5bc92d
TT
1293 for (bh = head, block_start = 0;
1294 ret == 0 && (bh != head || !block_start);
1295 block_start = block_end, bh = next)
ac27a0ec
DK
1296 {
1297 next = bh->b_this_page;
1298 block_end = block_start + blocksize;
1299 if (block_end <= from || block_start >= to) {
1300 if (partial && !buffer_uptodate(bh))
1301 *partial = 1;
1302 continue;
1303 }
1304 err = (*fn)(handle, bh);
1305 if (!ret)
1306 ret = err;
1307 }
1308 return ret;
1309}
1310
1311/*
1312 * To preserve ordering, it is essential that the hole instantiation and
1313 * the data write be encapsulated in a single transaction. We cannot
617ba13b 1314 * close off a transaction and start a new one between the ext4_get_block()
dab291af 1315 * and the commit_write(). So doing the jbd2_journal_start at the start of
ac27a0ec
DK
1316 * prepare_write() is the right place.
1317 *
617ba13b
MC
1318 * Also, this function can nest inside ext4_writepage() ->
1319 * block_write_full_page(). In that case, we *know* that ext4_writepage()
ac27a0ec
DK
1320 * has generated enough buffer credits to do the whole page. So we won't
1321 * block on the journal in that case, which is good, because the caller may
1322 * be PF_MEMALLOC.
1323 *
617ba13b 1324 * By accident, ext4 can be reentered when a transaction is open via
ac27a0ec
DK
1325 * quota file writes. If we were to commit the transaction while thus
1326 * reentered, there can be a deadlock - we would be holding a quota
1327 * lock, and the commit would never complete if another thread had a
1328 * transaction open and was blocking on the quota lock - a ranking
1329 * violation.
1330 *
dab291af 1331 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
ac27a0ec
DK
1332 * will _not_ run commit under these circumstances because handle->h_ref
1333 * is elevated. We'll still have enough credits for the tiny quotafile
1334 * write.
1335 */
1336static int do_journal_get_write_access(handle_t *handle,
1337 struct buffer_head *bh)
1338{
1339 if (!buffer_mapped(bh) || buffer_freed(bh))
1340 return 0;
617ba13b 1341 return ext4_journal_get_write_access(handle, bh);
ac27a0ec
DK
1342}
1343
bfc1af65
NP
1344static int ext4_write_begin(struct file *file, struct address_space *mapping,
1345 loff_t pos, unsigned len, unsigned flags,
1346 struct page **pagep, void **fsdata)
ac27a0ec 1347{
af5bc92d 1348 struct inode *inode = mapping->host;
7479d2b9 1349 int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
ac27a0ec
DK
1350 handle_t *handle;
1351 int retries = 0;
af5bc92d 1352 struct page *page;
bfc1af65 1353 pgoff_t index;
af5bc92d 1354 unsigned from, to;
bfc1af65 1355
ba80b101
TT
1356 trace_mark(ext4_write_begin,
1357 "dev %s ino %lu pos %llu len %u flags %u",
1358 inode->i_sb->s_id, inode->i_ino,
1359 (unsigned long long) pos, len, flags);
bfc1af65 1360 index = pos >> PAGE_CACHE_SHIFT;
af5bc92d
TT
1361 from = pos & (PAGE_CACHE_SIZE - 1);
1362 to = from + len;
ac27a0ec
DK
1363
1364retry:
af5bc92d
TT
1365 handle = ext4_journal_start(inode, needed_blocks);
1366 if (IS_ERR(handle)) {
1367 ret = PTR_ERR(handle);
1368 goto out;
7479d2b9 1369 }
ac27a0ec 1370
54566b2c 1371 page = grab_cache_page_write_begin(mapping, index, flags);
cf108bca
JK
1372 if (!page) {
1373 ext4_journal_stop(handle);
1374 ret = -ENOMEM;
1375 goto out;
1376 }
1377 *pagep = page;
1378
bfc1af65
NP
1379 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1380 ext4_get_block);
1381
1382 if (!ret && ext4_should_journal_data(inode)) {
ac27a0ec
DK
1383 ret = walk_page_buffers(handle, page_buffers(page),
1384 from, to, NULL, do_journal_get_write_access);
1385 }
bfc1af65
NP
1386
1387 if (ret) {
af5bc92d 1388 unlock_page(page);
cf108bca 1389 ext4_journal_stop(handle);
af5bc92d 1390 page_cache_release(page);
ae4d5372
AK
1391 /*
1392 * block_write_begin may have instantiated a few blocks
1393 * outside i_size. Trim these off again. Don't need
1394 * i_size_read because we hold i_mutex.
1395 */
1396 if (pos + len > inode->i_size)
1397 vmtruncate(inode, inode->i_size);
bfc1af65
NP
1398 }
1399
617ba13b 1400 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
ac27a0ec 1401 goto retry;
7479d2b9 1402out:
ac27a0ec
DK
1403 return ret;
1404}
1405
bfc1af65
NP
1406/* For write_end() in data=journal mode */
1407static int write_end_fn(handle_t *handle, struct buffer_head *bh)
ac27a0ec
DK
1408{
1409 if (!buffer_mapped(bh) || buffer_freed(bh))
1410 return 0;
1411 set_buffer_uptodate(bh);
0390131b 1412 return ext4_handle_dirty_metadata(handle, NULL, bh);
ac27a0ec
DK
1413}
1414
1415/*
1416 * We need to pick up the new inode size which generic_commit_write gave us
1417 * `file' can be NULL - eg, when called from page_symlink().
1418 *
617ba13b 1419 * ext4 never places buffers on inode->i_mapping->private_list. metadata
ac27a0ec
DK
1420 * buffers are managed internally.
1421 */
bfc1af65
NP
1422static int ext4_ordered_write_end(struct file *file,
1423 struct address_space *mapping,
1424 loff_t pos, unsigned len, unsigned copied,
1425 struct page *page, void *fsdata)
ac27a0ec 1426{
617ba13b 1427 handle_t *handle = ext4_journal_current_handle();
cf108bca 1428 struct inode *inode = mapping->host;
ac27a0ec
DK
1429 int ret = 0, ret2;
1430
ba80b101
TT
1431 trace_mark(ext4_ordered_write_end,
1432 "dev %s ino %lu pos %llu len %u copied %u",
1433 inode->i_sb->s_id, inode->i_ino,
1434 (unsigned long long) pos, len, copied);
678aaf48 1435 ret = ext4_jbd2_file_inode(handle, inode);
ac27a0ec
DK
1436
1437 if (ret == 0) {
ac27a0ec
DK
1438 loff_t new_i_size;
1439
bfc1af65 1440 new_i_size = pos + copied;
cf17fea6
AK
1441 if (new_i_size > EXT4_I(inode)->i_disksize) {
1442 ext4_update_i_disksize(inode, new_i_size);
1443 /* We need to mark inode dirty even if
1444 * new_i_size is less that inode->i_size
1445 * bu greater than i_disksize.(hint delalloc)
1446 */
1447 ext4_mark_inode_dirty(handle, inode);
1448 }
1449
cf108bca 1450 ret2 = generic_write_end(file, mapping, pos, len, copied,
bfc1af65 1451 page, fsdata);
f8a87d89
RK
1452 copied = ret2;
1453 if (ret2 < 0)
1454 ret = ret2;
ac27a0ec 1455 }
617ba13b 1456 ret2 = ext4_journal_stop(handle);
ac27a0ec
DK
1457 if (!ret)
1458 ret = ret2;
bfc1af65
NP
1459
1460 return ret ? ret : copied;
ac27a0ec
DK
1461}
1462
bfc1af65
NP
1463static int ext4_writeback_write_end(struct file *file,
1464 struct address_space *mapping,
1465 loff_t pos, unsigned len, unsigned copied,
1466 struct page *page, void *fsdata)
ac27a0ec 1467{
617ba13b 1468 handle_t *handle = ext4_journal_current_handle();
cf108bca 1469 struct inode *inode = mapping->host;
ac27a0ec
DK
1470 int ret = 0, ret2;
1471 loff_t new_i_size;
1472
ba80b101
TT
1473 trace_mark(ext4_writeback_write_end,
1474 "dev %s ino %lu pos %llu len %u copied %u",
1475 inode->i_sb->s_id, inode->i_ino,
1476 (unsigned long long) pos, len, copied);
bfc1af65 1477 new_i_size = pos + copied;
cf17fea6
AK
1478 if (new_i_size > EXT4_I(inode)->i_disksize) {
1479 ext4_update_i_disksize(inode, new_i_size);
1480 /* We need to mark inode dirty even if
1481 * new_i_size is less that inode->i_size
1482 * bu greater than i_disksize.(hint delalloc)
1483 */
1484 ext4_mark_inode_dirty(handle, inode);
1485 }
ac27a0ec 1486
cf108bca 1487 ret2 = generic_write_end(file, mapping, pos, len, copied,
bfc1af65 1488 page, fsdata);
f8a87d89
RK
1489 copied = ret2;
1490 if (ret2 < 0)
1491 ret = ret2;
ac27a0ec 1492
617ba13b 1493 ret2 = ext4_journal_stop(handle);
ac27a0ec
DK
1494 if (!ret)
1495 ret = ret2;
bfc1af65
NP
1496
1497 return ret ? ret : copied;
ac27a0ec
DK
1498}
1499
bfc1af65
NP
1500static int ext4_journalled_write_end(struct file *file,
1501 struct address_space *mapping,
1502 loff_t pos, unsigned len, unsigned copied,
1503 struct page *page, void *fsdata)
ac27a0ec 1504{
617ba13b 1505 handle_t *handle = ext4_journal_current_handle();
bfc1af65 1506 struct inode *inode = mapping->host;
ac27a0ec
DK
1507 int ret = 0, ret2;
1508 int partial = 0;
bfc1af65 1509 unsigned from, to;
cf17fea6 1510 loff_t new_i_size;
ac27a0ec 1511
ba80b101
TT
1512 trace_mark(ext4_journalled_write_end,
1513 "dev %s ino %lu pos %llu len %u copied %u",
1514 inode->i_sb->s_id, inode->i_ino,
1515 (unsigned long long) pos, len, copied);
bfc1af65
NP
1516 from = pos & (PAGE_CACHE_SIZE - 1);
1517 to = from + len;
1518
1519 if (copied < len) {
1520 if (!PageUptodate(page))
1521 copied = 0;
1522 page_zero_new_buffers(page, from+copied, to);
1523 }
ac27a0ec
DK
1524
1525 ret = walk_page_buffers(handle, page_buffers(page), from,
bfc1af65 1526 to, &partial, write_end_fn);
ac27a0ec
DK
1527 if (!partial)
1528 SetPageUptodate(page);
cf17fea6
AK
1529 new_i_size = pos + copied;
1530 if (new_i_size > inode->i_size)
bfc1af65 1531 i_size_write(inode, pos+copied);
617ba13b 1532 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
cf17fea6
AK
1533 if (new_i_size > EXT4_I(inode)->i_disksize) {
1534 ext4_update_i_disksize(inode, new_i_size);
617ba13b 1535 ret2 = ext4_mark_inode_dirty(handle, inode);
ac27a0ec
DK
1536 if (!ret)
1537 ret = ret2;
1538 }
bfc1af65 1539
cf108bca 1540 unlock_page(page);
617ba13b 1541 ret2 = ext4_journal_stop(handle);
ac27a0ec
DK
1542 if (!ret)
1543 ret = ret2;
bfc1af65
NP
1544 page_cache_release(page);
1545
1546 return ret ? ret : copied;
ac27a0ec 1547}
d2a17637
MC
1548
1549static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1550{
030ba6bc 1551 int retries = 0;
d2a17637
MC
1552 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1553 unsigned long md_needed, mdblocks, total = 0;
1554
1555 /*
1556 * recalculate the amount of metadata blocks to reserve
1557 * in order to allocate nrblocks
1558 * worse case is one extent per block
1559 */
030ba6bc 1560repeat:
d2a17637
MC
1561 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1562 total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
1563 mdblocks = ext4_calc_metadata_amount(inode, total);
1564 BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
1565
1566 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1567 total = md_needed + nrblocks;
1568
a30d542a 1569 if (ext4_claim_free_blocks(sbi, total)) {
d2a17637 1570 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
030ba6bc
AK
1571 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1572 yield();
1573 goto repeat;
1574 }
d2a17637
MC
1575 return -ENOSPC;
1576 }
d2a17637
MC
1577 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
1578 EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
1579
1580 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1581 return 0; /* success */
1582}
1583
12219aea 1584static void ext4_da_release_space(struct inode *inode, int to_free)
d2a17637
MC
1585{
1586 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1587 int total, mdb, mdb_free, release;
1588
cd213226
MC
1589 if (!to_free)
1590 return; /* Nothing to release, exit */
1591
d2a17637 1592 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
cd213226
MC
1593
1594 if (!EXT4_I(inode)->i_reserved_data_blocks) {
1595 /*
1596 * if there is no reserved blocks, but we try to free some
1597 * then the counter is messed up somewhere.
1598 * but since this function is called from invalidate
1599 * page, it's harmless to return without any action
1600 */
1601 printk(KERN_INFO "ext4 delalloc try to release %d reserved "
1602 "blocks for inode %lu, but there is no reserved "
1603 "data blocks\n", to_free, inode->i_ino);
1604 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1605 return;
1606 }
1607
d2a17637 1608 /* recalculate the number of metablocks still need to be reserved */
12219aea 1609 total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
d2a17637
MC
1610 mdb = ext4_calc_metadata_amount(inode, total);
1611
1612 /* figure out how many metablocks to release */
1613 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1614 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
1615
d2a17637
MC
1616 release = to_free + mdb_free;
1617
6bc6e63f
AK
1618 /* update fs dirty blocks counter for truncate case */
1619 percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
d2a17637
MC
1620
1621 /* update per-inode reservations */
12219aea
AK
1622 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
1623 EXT4_I(inode)->i_reserved_data_blocks -= to_free;
d2a17637
MC
1624
1625 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1626 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
d2a17637
MC
1627 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1628}
1629
1630static void ext4_da_page_release_reservation(struct page *page,
1631 unsigned long offset)
1632{
1633 int to_release = 0;
1634 struct buffer_head *head, *bh;
1635 unsigned int curr_off = 0;
1636
1637 head = page_buffers(page);
1638 bh = head;
1639 do {
1640 unsigned int next_off = curr_off + bh->b_size;
1641
1642 if ((offset <= curr_off) && (buffer_delay(bh))) {
1643 to_release++;
1644 clear_buffer_delay(bh);
1645 }
1646 curr_off = next_off;
1647 } while ((bh = bh->b_this_page) != head);
12219aea 1648 ext4_da_release_space(page->mapping->host, to_release);
d2a17637 1649}
ac27a0ec 1650
64769240
AT
1651/*
1652 * Delayed allocation stuff
1653 */
1654
1655struct mpage_da_data {
1656 struct inode *inode;
1657 struct buffer_head lbh; /* extent of blocks */
1658 unsigned long first_page, next_page; /* extent of pages */
1659 get_block_t *get_block;
1660 struct writeback_control *wbc;
a1d6cc56 1661 int io_done;
498e5f24 1662 int pages_written;
df22291f 1663 int retval;
64769240
AT
1664};
1665
1666/*
1667 * mpage_da_submit_io - walks through extent of pages and try to write
a1d6cc56 1668 * them with writepage() call back
64769240
AT
1669 *
1670 * @mpd->inode: inode
1671 * @mpd->first_page: first page of the extent
1672 * @mpd->next_page: page after the last page of the extent
1673 * @mpd->get_block: the filesystem's block mapper function
1674 *
1675 * By the time mpage_da_submit_io() is called we expect all blocks
1676 * to be allocated. this may be wrong if allocation failed.
1677 *
1678 * As pages are already locked by write_cache_pages(), we can't use it
1679 */
1680static int mpage_da_submit_io(struct mpage_da_data *mpd)
1681{
22208ded 1682 long pages_skipped;
791b7f08
AK
1683 struct pagevec pvec;
1684 unsigned long index, end;
1685 int ret = 0, err, nr_pages, i;
1686 struct inode *inode = mpd->inode;
1687 struct address_space *mapping = inode->i_mapping;
64769240
AT
1688
1689 BUG_ON(mpd->next_page <= mpd->first_page);
791b7f08
AK
1690 /*
1691 * We need to start from the first_page to the next_page - 1
1692 * to make sure we also write the mapped dirty buffer_heads.
1693 * If we look at mpd->lbh.b_blocknr we would only be looking
1694 * at the currently mapped buffer_heads.
1695 */
64769240
AT
1696 index = mpd->first_page;
1697 end = mpd->next_page - 1;
1698
791b7f08 1699 pagevec_init(&pvec, 0);
64769240 1700 while (index <= end) {
791b7f08 1701 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
64769240
AT
1702 if (nr_pages == 0)
1703 break;
1704 for (i = 0; i < nr_pages; i++) {
1705 struct page *page = pvec.pages[i];
1706
791b7f08
AK
1707 index = page->index;
1708 if (index > end)
1709 break;
1710 index++;
1711
1712 BUG_ON(!PageLocked(page));
1713 BUG_ON(PageWriteback(page));
1714
22208ded 1715 pages_skipped = mpd->wbc->pages_skipped;
a1d6cc56 1716 err = mapping->a_ops->writepage(page, mpd->wbc);
22208ded
AK
1717 if (!err && (pages_skipped == mpd->wbc->pages_skipped))
1718 /*
1719 * have successfully written the page
1720 * without skipping the same
1721 */
a1d6cc56 1722 mpd->pages_written++;
64769240
AT
1723 /*
1724 * In error case, we have to continue because
1725 * remaining pages are still locked
1726 * XXX: unlock and re-dirty them?
1727 */
1728 if (ret == 0)
1729 ret = err;
1730 }
1731 pagevec_release(&pvec);
1732 }
64769240
AT
1733 return ret;
1734}
1735
1736/*
1737 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
1738 *
1739 * @mpd->inode - inode to walk through
1740 * @exbh->b_blocknr - first block on a disk
1741 * @exbh->b_size - amount of space in bytes
1742 * @logical - first logical block to start assignment with
1743 *
1744 * the function goes through all passed space and put actual disk
1745 * block numbers into buffer heads, dropping BH_Delay
1746 */
1747static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
1748 struct buffer_head *exbh)
1749{
1750 struct inode *inode = mpd->inode;
1751 struct address_space *mapping = inode->i_mapping;
1752 int blocks = exbh->b_size >> inode->i_blkbits;
1753 sector_t pblock = exbh->b_blocknr, cur_logical;
1754 struct buffer_head *head, *bh;
a1d6cc56 1755 pgoff_t index, end;
64769240
AT
1756 struct pagevec pvec;
1757 int nr_pages, i;
1758
1759 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1760 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1761 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1762
1763 pagevec_init(&pvec, 0);
1764
1765 while (index <= end) {
1766 /* XXX: optimize tail */
1767 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1768 if (nr_pages == 0)
1769 break;
1770 for (i = 0; i < nr_pages; i++) {
1771 struct page *page = pvec.pages[i];
1772
1773 index = page->index;
1774 if (index > end)
1775 break;
1776 index++;
1777
1778 BUG_ON(!PageLocked(page));
1779 BUG_ON(PageWriteback(page));
1780 BUG_ON(!page_has_buffers(page));
1781
1782 bh = page_buffers(page);
1783 head = bh;
1784
1785 /* skip blocks out of the range */
1786 do {
1787 if (cur_logical >= logical)
1788 break;
1789 cur_logical++;
1790 } while ((bh = bh->b_this_page) != head);
1791
1792 do {
1793 if (cur_logical >= logical + blocks)
1794 break;
64769240
AT
1795 if (buffer_delay(bh)) {
1796 bh->b_blocknr = pblock;
1797 clear_buffer_delay(bh);
bf068ee2
AK
1798 bh->b_bdev = inode->i_sb->s_bdev;
1799 } else if (buffer_unwritten(bh)) {
1800 bh->b_blocknr = pblock;
1801 clear_buffer_unwritten(bh);
1802 set_buffer_mapped(bh);
1803 set_buffer_new(bh);
1804 bh->b_bdev = inode->i_sb->s_bdev;
61628a3f 1805 } else if (buffer_mapped(bh))
64769240 1806 BUG_ON(bh->b_blocknr != pblock);
64769240
AT
1807
1808 cur_logical++;
1809 pblock++;
1810 } while ((bh = bh->b_this_page) != head);
1811 }
1812 pagevec_release(&pvec);
1813 }
1814}
1815
1816
1817/*
1818 * __unmap_underlying_blocks - just a helper function to unmap
1819 * set of blocks described by @bh
1820 */
1821static inline void __unmap_underlying_blocks(struct inode *inode,
1822 struct buffer_head *bh)
1823{
1824 struct block_device *bdev = inode->i_sb->s_bdev;
1825 int blocks, i;
1826
1827 blocks = bh->b_size >> inode->i_blkbits;
1828 for (i = 0; i < blocks; i++)
1829 unmap_underlying_metadata(bdev, bh->b_blocknr + i);
1830}
1831
c4a0c46e
AK
1832static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
1833 sector_t logical, long blk_cnt)
1834{
1835 int nr_pages, i;
1836 pgoff_t index, end;
1837 struct pagevec pvec;
1838 struct inode *inode = mpd->inode;
1839 struct address_space *mapping = inode->i_mapping;
1840
1841 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
1842 end = (logical + blk_cnt - 1) >>
1843 (PAGE_CACHE_SHIFT - inode->i_blkbits);
1844 while (index <= end) {
1845 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1846 if (nr_pages == 0)
1847 break;
1848 for (i = 0; i < nr_pages; i++) {
1849 struct page *page = pvec.pages[i];
1850 index = page->index;
1851 if (index > end)
1852 break;
1853 index++;
1854
1855 BUG_ON(!PageLocked(page));
1856 BUG_ON(PageWriteback(page));
1857 block_invalidatepage(page, 0);
1858 ClearPageUptodate(page);
1859 unlock_page(page);
1860 }
1861 }
1862 return;
1863}
1864
df22291f
AK
1865static void ext4_print_free_blocks(struct inode *inode)
1866{
1867 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1868 printk(KERN_EMERG "Total free blocks count %lld\n",
1869 ext4_count_free_blocks(inode->i_sb));
1870 printk(KERN_EMERG "Free/Dirty block details\n");
1871 printk(KERN_EMERG "free_blocks=%lld\n",
8f72fbdf 1872 (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
df22291f 1873 printk(KERN_EMERG "dirty_blocks=%lld\n",
8f72fbdf 1874 (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
df22291f 1875 printk(KERN_EMERG "Block reservation details\n");
498e5f24 1876 printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
df22291f 1877 EXT4_I(inode)->i_reserved_data_blocks);
498e5f24 1878 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
df22291f
AK
1879 EXT4_I(inode)->i_reserved_meta_blocks);
1880 return;
1881}
1882
64769240
AT
1883/*
1884 * mpage_da_map_blocks - go through given space
1885 *
1886 * @mpd->lbh - bh describing space
1887 * @mpd->get_block - the filesystem's block mapper function
1888 *
1889 * The function skips space we know is already mapped to disk blocks.
1890 *
64769240 1891 */
c4a0c46e 1892static int mpage_da_map_blocks(struct mpage_da_data *mpd)
64769240 1893{
a1d6cc56 1894 int err = 0;
030ba6bc 1895 struct buffer_head new;
64769240 1896 struct buffer_head *lbh = &mpd->lbh;
df22291f 1897 sector_t next;
64769240
AT
1898
1899 /*
1900 * We consider only non-mapped and non-allocated blocks
1901 */
1902 if (buffer_mapped(lbh) && !buffer_delay(lbh))
c4a0c46e 1903 return 0;
a1d6cc56
AK
1904 new.b_state = lbh->b_state;
1905 new.b_blocknr = 0;
1906 new.b_size = lbh->b_size;
df22291f 1907 next = lbh->b_blocknr;
a1d6cc56
AK
1908 /*
1909 * If we didn't accumulate anything
1910 * to write simply return
1911 */
1912 if (!new.b_size)
c4a0c46e 1913 return 0;
a1d6cc56 1914 err = mpd->get_block(mpd->inode, next, &new, 1);
c4a0c46e
AK
1915 if (err) {
1916
1917 /* If get block returns with error
1918 * we simply return. Later writepage
1919 * will redirty the page and writepages
1920 * will find the dirty page again
1921 */
1922 if (err == -EAGAIN)
1923 return 0;
df22291f
AK
1924
1925 if (err == -ENOSPC &&
1926 ext4_count_free_blocks(mpd->inode->i_sb)) {
1927 mpd->retval = err;
1928 return 0;
1929 }
1930
c4a0c46e
AK
1931 /*
1932 * get block failure will cause us
1933 * to loop in writepages. Because
1934 * a_ops->writepage won't be able to
1935 * make progress. The page will be redirtied
1936 * by writepage and writepages will again
1937 * try to write the same.
1938 */
1939 printk(KERN_EMERG "%s block allocation failed for inode %lu "
1940 "at logical offset %llu with max blocks "
1941 "%zd with error %d\n",
1942 __func__, mpd->inode->i_ino,
1943 (unsigned long long)next,
1944 lbh->b_size >> mpd->inode->i_blkbits, err);
1945 printk(KERN_EMERG "This should not happen.!! "
1946 "Data will be lost\n");
030ba6bc 1947 if (err == -ENOSPC) {
df22291f 1948 ext4_print_free_blocks(mpd->inode);
030ba6bc 1949 }
c4a0c46e
AK
1950 /* invlaidate all the pages */
1951 ext4_da_block_invalidatepages(mpd, next,
1952 lbh->b_size >> mpd->inode->i_blkbits);
1953 return err;
1954 }
a1d6cc56 1955 BUG_ON(new.b_size == 0);
64769240 1956
a1d6cc56
AK
1957 if (buffer_new(&new))
1958 __unmap_underlying_blocks(mpd->inode, &new);
64769240 1959
a1d6cc56
AK
1960 /*
1961 * If blocks are delayed marked, we need to
1962 * put actual blocknr and drop delayed bit
1963 */
1964 if (buffer_delay(lbh) || buffer_unwritten(lbh))
1965 mpage_put_bnr_to_bhs(mpd, next, &new);
64769240 1966
c4a0c46e 1967 return 0;
64769240
AT
1968}
1969
bf068ee2
AK
1970#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1971 (1 << BH_Delay) | (1 << BH_Unwritten))
64769240
AT
1972
1973/*
1974 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1975 *
1976 * @mpd->lbh - extent of blocks
1977 * @logical - logical number of the block in the file
1978 * @bh - bh of the block (used to access block's state)
1979 *
1980 * the function is used to collect contig. blocks in same state
1981 */
1982static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1983 sector_t logical, struct buffer_head *bh)
1984{
64769240 1985 sector_t next;
525f4ed8
MC
1986 size_t b_size = bh->b_size;
1987 struct buffer_head *lbh = &mpd->lbh;
1988 int nrblocks = lbh->b_size >> mpd->inode->i_blkbits;
64769240 1989
525f4ed8
MC
1990 /* check if thereserved journal credits might overflow */
1991 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
1992 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1993 /*
1994 * With non-extent format we are limited by the journal
1995 * credit available. Total credit needed to insert
1996 * nrblocks contiguous blocks is dependent on the
1997 * nrblocks. So limit nrblocks.
1998 */
1999 goto flush_it;
2000 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
2001 EXT4_MAX_TRANS_DATA) {
2002 /*
2003 * Adding the new buffer_head would make it cross the
2004 * allowed limit for which we have journal credit
2005 * reserved. So limit the new bh->b_size
2006 */
2007 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
2008 mpd->inode->i_blkbits;
2009 /* we will do mpage_da_submit_io in the next loop */
2010 }
2011 }
64769240
AT
2012 /*
2013 * First block in the extent
2014 */
2015 if (lbh->b_size == 0) {
2016 lbh->b_blocknr = logical;
525f4ed8 2017 lbh->b_size = b_size;
64769240
AT
2018 lbh->b_state = bh->b_state & BH_FLAGS;
2019 return;
2020 }
2021
525f4ed8 2022 next = lbh->b_blocknr + nrblocks;
64769240
AT
2023 /*
2024 * Can we merge the block to our big extent?
2025 */
2026 if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) {
525f4ed8 2027 lbh->b_size += b_size;
64769240
AT
2028 return;
2029 }
2030
525f4ed8 2031flush_it:
64769240
AT
2032 /*
2033 * We couldn't merge the block to our extent, so we
2034 * need to flush current extent and start new one
2035 */
c4a0c46e
AK
2036 if (mpage_da_map_blocks(mpd) == 0)
2037 mpage_da_submit_io(mpd);
a1d6cc56
AK
2038 mpd->io_done = 1;
2039 return;
64769240
AT
2040}
2041
2042/*
2043 * __mpage_da_writepage - finds extent of pages and blocks
2044 *
2045 * @page: page to consider
2046 * @wbc: not used, we just follow rules
2047 * @data: context
2048 *
2049 * The function finds extents of pages and scan them for all blocks.
2050 */
2051static int __mpage_da_writepage(struct page *page,
2052 struct writeback_control *wbc, void *data)
2053{
2054 struct mpage_da_data *mpd = data;
2055 struct inode *inode = mpd->inode;
2056 struct buffer_head *bh, *head, fake;
2057 sector_t logical;
2058
a1d6cc56
AK
2059 if (mpd->io_done) {
2060 /*
2061 * Rest of the page in the page_vec
2062 * redirty then and skip then. We will
2063 * try to to write them again after
2064 * starting a new transaction
2065 */
2066 redirty_page_for_writepage(wbc, page);
2067 unlock_page(page);
2068 return MPAGE_DA_EXTENT_TAIL;
2069 }
64769240
AT
2070 /*
2071 * Can we merge this page to current extent?
2072 */
2073 if (mpd->next_page != page->index) {
2074 /*
2075 * Nope, we can't. So, we map non-allocated blocks
a1d6cc56 2076 * and start IO on them using writepage()
64769240
AT
2077 */
2078 if (mpd->next_page != mpd->first_page) {
c4a0c46e
AK
2079 if (mpage_da_map_blocks(mpd) == 0)
2080 mpage_da_submit_io(mpd);
a1d6cc56
AK
2081 /*
2082 * skip rest of the page in the page_vec
2083 */
2084 mpd->io_done = 1;
2085 redirty_page_for_writepage(wbc, page);
2086 unlock_page(page);
2087 return MPAGE_DA_EXTENT_TAIL;
64769240
AT
2088 }
2089
2090 /*
2091 * Start next extent of pages ...
2092 */
2093 mpd->first_page = page->index;
2094
2095 /*
2096 * ... and blocks
2097 */
2098 mpd->lbh.b_size = 0;
2099 mpd->lbh.b_state = 0;
2100 mpd->lbh.b_blocknr = 0;
2101 }
2102
2103 mpd->next_page = page->index + 1;
2104 logical = (sector_t) page->index <<
2105 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2106
2107 if (!page_has_buffers(page)) {
2108 /*
2109 * There is no attached buffer heads yet (mmap?)
2110 * we treat the page asfull of dirty blocks
2111 */
2112 bh = &fake;
2113 bh->b_size = PAGE_CACHE_SIZE;
2114 bh->b_state = 0;
2115 set_buffer_dirty(bh);
2116 set_buffer_uptodate(bh);
2117 mpage_add_bh_to_extent(mpd, logical, bh);
a1d6cc56
AK
2118 if (mpd->io_done)
2119 return MPAGE_DA_EXTENT_TAIL;
64769240
AT
2120 } else {
2121 /*
2122 * Page with regular buffer heads, just add all dirty ones
2123 */
2124 head = page_buffers(page);
2125 bh = head;
2126 do {
2127 BUG_ON(buffer_locked(bh));
791b7f08
AK
2128 /*
2129 * We need to try to allocate
2130 * unmapped blocks in the same page.
2131 * Otherwise we won't make progress
2132 * with the page in ext4_da_writepage
2133 */
a1d6cc56
AK
2134 if (buffer_dirty(bh) &&
2135 (!buffer_mapped(bh) || buffer_delay(bh))) {
64769240 2136 mpage_add_bh_to_extent(mpd, logical, bh);
a1d6cc56
AK
2137 if (mpd->io_done)
2138 return MPAGE_DA_EXTENT_TAIL;
791b7f08
AK
2139 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2140 /*
2141 * mapped dirty buffer. We need to update
2142 * the b_state because we look at
2143 * b_state in mpage_da_map_blocks. We don't
2144 * update b_size because if we find an
2145 * unmapped buffer_head later we need to
2146 * use the b_state flag of that buffer_head.
2147 */
2148 if (mpd->lbh.b_size == 0)
2149 mpd->lbh.b_state =
2150 bh->b_state & BH_FLAGS;
a1d6cc56 2151 }
64769240
AT
2152 logical++;
2153 } while ((bh = bh->b_this_page) != head);
2154 }
2155
2156 return 0;
2157}
2158
2159/*
2160 * mpage_da_writepages - walk the list of dirty pages of the given
2161 * address space, allocates non-allocated blocks, maps newly-allocated
2162 * blocks to existing bhs and issue IO them
2163 *
2164 * @mapping: address space structure to write
2165 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2166 * @get_block: the filesystem's block mapper function.
2167 *
2168 * This is a library function, which implements the writepages()
2169 * address_space_operation.
64769240
AT
2170 */
2171static int mpage_da_writepages(struct address_space *mapping,
2172 struct writeback_control *wbc,
df22291f 2173 struct mpage_da_data *mpd)
64769240 2174{
64769240
AT
2175 int ret;
2176
df22291f 2177 if (!mpd->get_block)
64769240
AT
2178 return generic_writepages(mapping, wbc);
2179
df22291f
AK
2180 mpd->lbh.b_size = 0;
2181 mpd->lbh.b_state = 0;
2182 mpd->lbh.b_blocknr = 0;
2183 mpd->first_page = 0;
2184 mpd->next_page = 0;
2185 mpd->io_done = 0;
2186 mpd->pages_written = 0;
2187 mpd->retval = 0;
a1d6cc56 2188
df22291f 2189 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd);
64769240
AT
2190 /*
2191 * Handle last extent of pages
2192 */
df22291f
AK
2193 if (!mpd->io_done && mpd->next_page != mpd->first_page) {
2194 if (mpage_da_map_blocks(mpd) == 0)
2195 mpage_da_submit_io(mpd);
64769240 2196
22208ded
AK
2197 mpd->io_done = 1;
2198 ret = MPAGE_DA_EXTENT_TAIL;
2199 }
2200 wbc->nr_to_write -= mpd->pages_written;
64769240
AT
2201 return ret;
2202}
2203
2204/*
2205 * this is a special callback for ->write_begin() only
2206 * it's intention is to return mapped block or reserve space
2207 */
2208static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2209 struct buffer_head *bh_result, int create)
2210{
2211 int ret = 0;
2212
2213 BUG_ON(create == 0);
2214 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2215
2216 /*
2217 * first, we need to know whether the block is allocated already
2218 * preallocated blocks are unmapped but should treated
2219 * the same as allocated blocks.
2220 */
d2a17637
MC
2221 ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0);
2222 if ((ret == 0) && !buffer_delay(bh_result)) {
2223 /* the block isn't (pre)allocated yet, let's reserve space */
64769240
AT
2224 /*
2225 * XXX: __block_prepare_write() unmaps passed block,
2226 * is it OK?
2227 */
d2a17637
MC
2228 ret = ext4_da_reserve_space(inode, 1);
2229 if (ret)
2230 /* not enough space to reserve */
2231 return ret;
2232
64769240
AT
2233 map_bh(bh_result, inode->i_sb, 0);
2234 set_buffer_new(bh_result);
2235 set_buffer_delay(bh_result);
2236 } else if (ret > 0) {
2237 bh_result->b_size = (ret << inode->i_blkbits);
2238 ret = 0;
2239 }
2240
2241 return ret;
2242}
d2a17637 2243#define EXT4_DELALLOC_RSVED 1
64769240
AT
2244static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
2245 struct buffer_head *bh_result, int create)
2246{
61628a3f 2247 int ret;
64769240
AT
2248 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2249 loff_t disksize = EXT4_I(inode)->i_disksize;
2250 handle_t *handle = NULL;
2251
61628a3f 2252 handle = ext4_journal_current_handle();
166348dd
AK
2253 BUG_ON(!handle);
2254 ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
2255 bh_result, create, 0, EXT4_DELALLOC_RSVED);
64769240 2256 if (ret > 0) {
166348dd 2257
64769240
AT
2258 bh_result->b_size = (ret << inode->i_blkbits);
2259
166348dd
AK
2260 if (ext4_should_order_data(inode)) {
2261 int retval;
2262 retval = ext4_jbd2_file_inode(handle, inode);
2263 if (retval)
2264 /*
2265 * Failed to add inode for ordered
2266 * mode. Don't update file size
2267 */
2268 return retval;
2269 }
2270
64769240
AT
2271 /*
2272 * Update on-disk size along with block allocation
2273 * we don't use 'extend_disksize' as size may change
2274 * within already allocated block -bzzz
2275 */
2276 disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
2277 if (disksize > i_size_read(inode))
2278 disksize = i_size_read(inode);
2279 if (disksize > EXT4_I(inode)->i_disksize) {
cf17fea6
AK
2280 ext4_update_i_disksize(inode, disksize);
2281 ret = ext4_mark_inode_dirty(handle, inode);
2282 return ret;
64769240 2283 }
64769240
AT
2284 ret = 0;
2285 }
64769240
AT
2286 return ret;
2287}
61628a3f
MC
2288
2289static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
2290{
f0e6c985
AK
2291 /*
2292 * unmapped buffer is possible for holes.
2293 * delay buffer is possible with delayed allocation
2294 */
2295 return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
2296}
2297
2298static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
2299 struct buffer_head *bh_result, int create)
2300{
2301 int ret = 0;
2302 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2303
2304 /*
2305 * we don't want to do block allocation in writepage
2306 * so call get_block_wrap with create = 0
2307 */
2308 ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
2309 bh_result, 0, 0, 0);
2310 if (ret > 0) {
2311 bh_result->b_size = (ret << inode->i_blkbits);
2312 ret = 0;
2313 }
2314 return ret;
61628a3f
MC
2315}
2316
61628a3f 2317/*
f0e6c985
AK
2318 * get called vi ext4_da_writepages after taking page lock (have journal handle)
2319 * get called via journal_submit_inode_data_buffers (no journal handle)
2320 * get called via shrink_page_list via pdflush (no journal handle)
2321 * or grab_page_cache when doing write_begin (have journal handle)
61628a3f 2322 */
64769240
AT
2323static int ext4_da_writepage(struct page *page,
2324 struct writeback_control *wbc)
2325{
64769240 2326 int ret = 0;
61628a3f 2327 loff_t size;
498e5f24 2328 unsigned int len;
61628a3f
MC
2329 struct buffer_head *page_bufs;
2330 struct inode *inode = page->mapping->host;
2331
ba80b101
TT
2332 trace_mark(ext4_da_writepage,
2333 "dev %s ino %lu page_index %lu",
2334 inode->i_sb->s_id, inode->i_ino, page->index);
f0e6c985
AK
2335 size = i_size_read(inode);
2336 if (page->index == size >> PAGE_CACHE_SHIFT)
2337 len = size & ~PAGE_CACHE_MASK;
2338 else
2339 len = PAGE_CACHE_SIZE;
64769240 2340
f0e6c985 2341 if (page_has_buffers(page)) {
61628a3f 2342 page_bufs = page_buffers(page);
f0e6c985
AK
2343 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2344 ext4_bh_unmapped_or_delay)) {
61628a3f 2345 /*
f0e6c985
AK
2346 * We don't want to do block allocation
2347 * So redirty the page and return
cd1aac32
AK
2348 * We may reach here when we do a journal commit
2349 * via journal_submit_inode_data_buffers.
2350 * If we don't have mapping block we just ignore
f0e6c985
AK
2351 * them. We can also reach here via shrink_page_list
2352 */
2353 redirty_page_for_writepage(wbc, page);
2354 unlock_page(page);
2355 return 0;
2356 }
2357 } else {
2358 /*
2359 * The test for page_has_buffers() is subtle:
2360 * We know the page is dirty but it lost buffers. That means
2361 * that at some moment in time after write_begin()/write_end()
2362 * has been called all buffers have been clean and thus they
2363 * must have been written at least once. So they are all
2364 * mapped and we can happily proceed with mapping them
2365 * and writing the page.
2366 *
2367 * Try to initialize the buffer_heads and check whether
2368 * all are mapped and non delay. We don't want to
2369 * do block allocation here.
2370 */
2371 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2372 ext4_normal_get_block_write);
2373 if (!ret) {
2374 page_bufs = page_buffers(page);
2375 /* check whether all are mapped and non delay */
2376 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2377 ext4_bh_unmapped_or_delay)) {
2378 redirty_page_for_writepage(wbc, page);
2379 unlock_page(page);
2380 return 0;
2381 }
2382 } else {
2383 /*
2384 * We can't do block allocation here
2385 * so just redity the page and unlock
2386 * and return
61628a3f 2387 */
61628a3f
MC
2388 redirty_page_for_writepage(wbc, page);
2389 unlock_page(page);
2390 return 0;
2391 }
ed9b3e33
AK
2392 /* now mark the buffer_heads as dirty and uptodate */
2393 block_commit_write(page, 0, PAGE_CACHE_SIZE);
64769240
AT
2394 }
2395
2396 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
f0e6c985 2397 ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
64769240 2398 else
f0e6c985
AK
2399 ret = block_write_full_page(page,
2400 ext4_normal_get_block_write,
2401 wbc);
64769240 2402
64769240
AT
2403 return ret;
2404}
2405
61628a3f 2406/*
525f4ed8
MC
2407 * This is called via ext4_da_writepages() to
2408 * calulate the total number of credits to reserve to fit
2409 * a single extent allocation into a single transaction,
2410 * ext4_da_writpeages() will loop calling this before
2411 * the block allocation.
61628a3f 2412 */
525f4ed8
MC
2413
2414static int ext4_da_writepages_trans_blocks(struct inode *inode)
2415{
2416 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2417
2418 /*
2419 * With non-extent format the journal credit needed to
2420 * insert nrblocks contiguous block is dependent on
2421 * number of contiguous block. So we will limit
2422 * number of contiguous block to a sane value
2423 */
2424 if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
2425 (max_blocks > EXT4_MAX_TRANS_DATA))
2426 max_blocks = EXT4_MAX_TRANS_DATA;
2427
2428 return ext4_chunk_trans_blocks(inode, max_blocks);
2429}
61628a3f 2430
64769240 2431static int ext4_da_writepages(struct address_space *mapping,
a1d6cc56 2432 struct writeback_control *wbc)
64769240 2433{
22208ded
AK
2434 pgoff_t index;
2435 int range_whole = 0;
61628a3f 2436 handle_t *handle = NULL;
df22291f 2437 struct mpage_da_data mpd;
5e745b04 2438 struct inode *inode = mapping->host;
22208ded 2439 int no_nrwrite_index_update;
498e5f24
TT
2440 int pages_written = 0;
2441 long pages_skipped;
5e745b04 2442 int needed_blocks, ret = 0, nr_to_writebump = 0;
5e745b04 2443 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
61628a3f 2444
ba80b101
TT
2445 trace_mark(ext4_da_writepages,
2446 "dev %s ino %lu nr_t_write %ld "
2447 "pages_skipped %ld range_start %llu "
2448 "range_end %llu nonblocking %d "
2449 "for_kupdate %d for_reclaim %d "
2450 "for_writepages %d range_cyclic %d",
2451 inode->i_sb->s_id, inode->i_ino,
2452 wbc->nr_to_write, wbc->pages_skipped,
2453 (unsigned long long) wbc->range_start,
2454 (unsigned long long) wbc->range_end,
2455 wbc->nonblocking, wbc->for_kupdate,
2456 wbc->for_reclaim, wbc->for_writepages,
2457 wbc->range_cyclic);
2458
61628a3f
MC
2459 /*
2460 * No pages to write? This is mainly a kludge to avoid starting
2461 * a transaction for special inodes like journal inode on last iput()
2462 * because that could violate lock ordering on umount
2463 */
a1d6cc56 2464 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
61628a3f 2465 return 0;
2a21e37e
TT
2466
2467 /*
2468 * If the filesystem has aborted, it is read-only, so return
2469 * right away instead of dumping stack traces later on that
2470 * will obscure the real source of the problem. We test
2471 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
2472 * the latter could be true if the filesystem is mounted
2473 * read-only, and in that case, ext4_da_writepages should
2474 * *never* be called, so if that ever happens, we would want
2475 * the stack trace.
2476 */
2477 if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
2478 return -EROFS;
2479
5e745b04
AK
2480 /*
2481 * Make sure nr_to_write is >= sbi->s_mb_stream_request
2482 * This make sure small files blocks are allocated in
2483 * single attempt. This ensure that small files
2484 * get less fragmented.
2485 */
2486 if (wbc->nr_to_write < sbi->s_mb_stream_request) {
2487 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
2488 wbc->nr_to_write = sbi->s_mb_stream_request;
2489 }
22208ded
AK
2490 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2491 range_whole = 1;
61628a3f 2492
22208ded
AK
2493 if (wbc->range_cyclic)
2494 index = mapping->writeback_index;
2495 else
2496 index = wbc->range_start >> PAGE_CACHE_SHIFT;
a1d6cc56 2497
df22291f
AK
2498 mpd.wbc = wbc;
2499 mpd.inode = mapping->host;
2500
22208ded
AK
2501 /*
2502 * we don't want write_cache_pages to update
2503 * nr_to_write and writeback_index
2504 */
2505 no_nrwrite_index_update = wbc->no_nrwrite_index_update;
2506 wbc->no_nrwrite_index_update = 1;
2507 pages_skipped = wbc->pages_skipped;
2508
2509 while (!ret && wbc->nr_to_write > 0) {
a1d6cc56
AK
2510
2511 /*
2512 * we insert one extent at a time. So we need
2513 * credit needed for single extent allocation.
2514 * journalled mode is currently not supported
2515 * by delalloc
2516 */
2517 BUG_ON(ext4_should_journal_data(inode));
525f4ed8 2518 needed_blocks = ext4_da_writepages_trans_blocks(inode);
a1d6cc56 2519
61628a3f
MC
2520 /* start a new transaction*/
2521 handle = ext4_journal_start(inode, needed_blocks);
2522 if (IS_ERR(handle)) {
2523 ret = PTR_ERR(handle);
2a21e37e 2524 printk(KERN_CRIT "%s: jbd2_start: "
a1d6cc56
AK
2525 "%ld pages, ino %lu; err %d\n", __func__,
2526 wbc->nr_to_write, inode->i_ino, ret);
2527 dump_stack();
61628a3f
MC
2528 goto out_writepages;
2529 }
df22291f
AK
2530 mpd.get_block = ext4_da_get_block_write;
2531 ret = mpage_da_writepages(mapping, wbc, &mpd);
2532
61628a3f 2533 ext4_journal_stop(handle);
df22291f 2534
22208ded
AK
2535 if (mpd.retval == -ENOSPC) {
2536 /* commit the transaction which would
2537 * free blocks released in the transaction
2538 * and try again
2539 */
df22291f 2540 jbd2_journal_force_commit_nested(sbi->s_journal);
22208ded
AK
2541 wbc->pages_skipped = pages_skipped;
2542 ret = 0;
2543 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
a1d6cc56
AK
2544 /*
2545 * got one extent now try with
2546 * rest of the pages
2547 */
22208ded
AK
2548 pages_written += mpd.pages_written;
2549 wbc->pages_skipped = pages_skipped;
a1d6cc56 2550 ret = 0;
22208ded 2551 } else if (wbc->nr_to_write)
61628a3f
MC
2552 /*
2553 * There is no more writeout needed
2554 * or we requested for a noblocking writeout
2555 * and we found the device congested
2556 */
61628a3f 2557 break;
a1d6cc56 2558 }
22208ded
AK
2559 if (pages_skipped != wbc->pages_skipped)
2560 printk(KERN_EMERG "This should not happen leaving %s "
2561 "with nr_to_write = %ld ret = %d\n",
2562 __func__, wbc->nr_to_write, ret);
2563
2564 /* Update index */
2565 index += pages_written;
2566 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2567 /*
2568 * set the writeback_index so that range_cyclic
2569 * mode will write it back later
2570 */
2571 mapping->writeback_index = index;
a1d6cc56 2572
61628a3f 2573out_writepages:
22208ded
AK
2574 if (!no_nrwrite_index_update)
2575 wbc->no_nrwrite_index_update = 0;
2576 wbc->nr_to_write -= nr_to_writebump;
ba80b101
TT
2577 trace_mark(ext4_da_writepage_result,
2578 "dev %s ino %lu ret %d pages_written %d "
2579 "pages_skipped %ld congestion %d "
2580 "more_io %d no_nrwrite_index_update %d",
2581 inode->i_sb->s_id, inode->i_ino, ret,
2582 pages_written, wbc->pages_skipped,
2583 wbc->encountered_congestion, wbc->more_io,
2584 wbc->no_nrwrite_index_update);
61628a3f 2585 return ret;
64769240
AT
2586}
2587
79f0be8d
AK
2588#define FALL_BACK_TO_NONDELALLOC 1
2589static int ext4_nonda_switch(struct super_block *sb)
2590{
2591 s64 free_blocks, dirty_blocks;
2592 struct ext4_sb_info *sbi = EXT4_SB(sb);
2593
2594 /*
2595 * switch to non delalloc mode if we are running low
2596 * on free block. The free block accounting via percpu
179f7ebf 2597 * counters can get slightly wrong with percpu_counter_batch getting
79f0be8d
AK
2598 * accumulated on each CPU without updating global counters
2599 * Delalloc need an accurate free block accounting. So switch
2600 * to non delalloc when we are near to error range.
2601 */
2602 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
2603 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
2604 if (2 * free_blocks < 3 * dirty_blocks ||
2605 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
2606 /*
2607 * free block count is less that 150% of dirty blocks
2608 * or free blocks is less that watermark
2609 */
2610 return 1;
2611 }
2612 return 0;
2613}
2614
64769240
AT
2615static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2616 loff_t pos, unsigned len, unsigned flags,
2617 struct page **pagep, void **fsdata)
2618{
d2a17637 2619 int ret, retries = 0;
64769240
AT
2620 struct page *page;
2621 pgoff_t index;
2622 unsigned from, to;
2623 struct inode *inode = mapping->host;
2624 handle_t *handle;
2625
2626 index = pos >> PAGE_CACHE_SHIFT;
2627 from = pos & (PAGE_CACHE_SIZE - 1);
2628 to = from + len;
79f0be8d
AK
2629
2630 if (ext4_nonda_switch(inode->i_sb)) {
2631 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2632 return ext4_write_begin(file, mapping, pos,
2633 len, flags, pagep, fsdata);
2634 }
2635 *fsdata = (void *)0;
ba80b101
TT
2636
2637 trace_mark(ext4_da_write_begin,
2638 "dev %s ino %lu pos %llu len %u flags %u",
2639 inode->i_sb->s_id, inode->i_ino,
2640 (unsigned long long) pos, len, flags);
d2a17637 2641retry:
64769240
AT
2642 /*
2643 * With delayed allocation, we don't log the i_disksize update
2644 * if there is delayed block allocation. But we still need
2645 * to journalling the i_disksize update if writes to the end
2646 * of file which has an already mapped buffer.
2647 */
2648 handle = ext4_journal_start(inode, 1);
2649 if (IS_ERR(handle)) {
2650 ret = PTR_ERR(handle);
2651 goto out;
2652 }
2653
54566b2c 2654 page = grab_cache_page_write_begin(mapping, index, flags);
d5a0d4f7
ES
2655 if (!page) {
2656 ext4_journal_stop(handle);
2657 ret = -ENOMEM;
2658 goto out;
2659 }
64769240
AT
2660 *pagep = page;
2661
2662 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
2663 ext4_da_get_block_prep);
2664 if (ret < 0) {
2665 unlock_page(page);
2666 ext4_journal_stop(handle);
2667 page_cache_release(page);
ae4d5372
AK
2668 /*
2669 * block_write_begin may have instantiated a few blocks
2670 * outside i_size. Trim these off again. Don't need
2671 * i_size_read because we hold i_mutex.
2672 */
2673 if (pos + len > inode->i_size)
2674 vmtruncate(inode, inode->i_size);
64769240
AT
2675 }
2676
d2a17637
MC
2677 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2678 goto retry;
64769240
AT
2679out:
2680 return ret;
2681}
2682
632eaeab
MC
2683/*
2684 * Check if we should update i_disksize
2685 * when write to the end of file but not require block allocation
2686 */
2687static int ext4_da_should_update_i_disksize(struct page *page,
2688 unsigned long offset)
2689{
2690 struct buffer_head *bh;
2691 struct inode *inode = page->mapping->host;
2692 unsigned int idx;
2693 int i;
2694
2695 bh = page_buffers(page);
2696 idx = offset >> inode->i_blkbits;
2697
af5bc92d 2698 for (i = 0; i < idx; i++)
632eaeab
MC
2699 bh = bh->b_this_page;
2700
2701 if (!buffer_mapped(bh) || (buffer_delay(bh)))
2702 return 0;
2703 return 1;
2704}
2705
64769240
AT
2706static int ext4_da_write_end(struct file *file,
2707 struct address_space *mapping,
2708 loff_t pos, unsigned len, unsigned copied,
2709 struct page *page, void *fsdata)
2710{
2711 struct inode *inode = mapping->host;
2712 int ret = 0, ret2;
2713 handle_t *handle = ext4_journal_current_handle();
2714 loff_t new_i_size;
632eaeab 2715 unsigned long start, end;
79f0be8d
AK
2716 int write_mode = (int)(unsigned long)fsdata;
2717
2718 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2719 if (ext4_should_order_data(inode)) {
2720 return ext4_ordered_write_end(file, mapping, pos,
2721 len, copied, page, fsdata);
2722 } else if (ext4_should_writeback_data(inode)) {
2723 return ext4_writeback_write_end(file, mapping, pos,
2724 len, copied, page, fsdata);
2725 } else {
2726 BUG();
2727 }
2728 }
632eaeab 2729
ba80b101
TT
2730 trace_mark(ext4_da_write_end,
2731 "dev %s ino %lu pos %llu len %u copied %u",
2732 inode->i_sb->s_id, inode->i_ino,
2733 (unsigned long long) pos, len, copied);
632eaeab 2734 start = pos & (PAGE_CACHE_SIZE - 1);
af5bc92d 2735 end = start + copied - 1;
64769240
AT
2736
2737 /*
2738 * generic_write_end() will run mark_inode_dirty() if i_size
2739 * changes. So let's piggyback the i_disksize mark_inode_dirty
2740 * into that.
2741 */
2742
2743 new_i_size = pos + copied;
632eaeab
MC
2744 if (new_i_size > EXT4_I(inode)->i_disksize) {
2745 if (ext4_da_should_update_i_disksize(page, end)) {
2746 down_write(&EXT4_I(inode)->i_data_sem);
2747 if (new_i_size > EXT4_I(inode)->i_disksize) {
2748 /*
2749 * Updating i_disksize when extending file
2750 * without needing block allocation
2751 */
2752 if (ext4_should_order_data(inode))
2753 ret = ext4_jbd2_file_inode(handle,
2754 inode);
64769240 2755
632eaeab
MC
2756 EXT4_I(inode)->i_disksize = new_i_size;
2757 }
2758 up_write(&EXT4_I(inode)->i_data_sem);
cf17fea6
AK
2759 /* We need to mark inode dirty even if
2760 * new_i_size is less that inode->i_size
2761 * bu greater than i_disksize.(hint delalloc)
2762 */
2763 ext4_mark_inode_dirty(handle, inode);
64769240 2764 }
632eaeab 2765 }
64769240
AT
2766 ret2 = generic_write_end(file, mapping, pos, len, copied,
2767 page, fsdata);
2768 copied = ret2;
2769 if (ret2 < 0)
2770 ret = ret2;
2771 ret2 = ext4_journal_stop(handle);
2772 if (!ret)
2773 ret = ret2;
2774
2775 return ret ? ret : copied;
2776}
2777
2778static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2779{
64769240
AT
2780 /*
2781 * Drop reserved blocks
2782 */
2783 BUG_ON(!PageLocked(page));
2784 if (!page_has_buffers(page))
2785 goto out;
2786
d2a17637 2787 ext4_da_page_release_reservation(page, offset);
64769240
AT
2788
2789out:
2790 ext4_invalidatepage(page, offset);
2791
2792 return;
2793}
2794
2795
ac27a0ec
DK
2796/*
2797 * bmap() is special. It gets used by applications such as lilo and by
2798 * the swapper to find the on-disk block of a specific piece of data.
2799 *
2800 * Naturally, this is dangerous if the block concerned is still in the
617ba13b 2801 * journal. If somebody makes a swapfile on an ext4 data-journaling
ac27a0ec
DK
2802 * filesystem and enables swap, then they may get a nasty shock when the
2803 * data getting swapped to that swapfile suddenly gets overwritten by
2804 * the original zero's written out previously to the journal and
2805 * awaiting writeback in the kernel's buffer cache.
2806 *
2807 * So, if we see any bmap calls here on a modified, data-journaled file,
2808 * take extra steps to flush any blocks which might be in the cache.
2809 */
617ba13b 2810static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
ac27a0ec
DK
2811{
2812 struct inode *inode = mapping->host;
2813 journal_t *journal;
2814 int err;
2815
64769240
AT
2816 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2817 test_opt(inode->i_sb, DELALLOC)) {
2818 /*
2819 * With delalloc we want to sync the file
2820 * so that we can make sure we allocate
2821 * blocks for file
2822 */
2823 filemap_write_and_wait(mapping);
2824 }
2825
0390131b 2826 if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
ac27a0ec
DK
2827 /*
2828 * This is a REALLY heavyweight approach, but the use of
2829 * bmap on dirty files is expected to be extremely rare:
2830 * only if we run lilo or swapon on a freshly made file
2831 * do we expect this to happen.
2832 *
2833 * (bmap requires CAP_SYS_RAWIO so this does not
2834 * represent an unprivileged user DOS attack --- we'd be
2835 * in trouble if mortal users could trigger this path at
2836 * will.)
2837 *
617ba13b 2838 * NB. EXT4_STATE_JDATA is not set on files other than
ac27a0ec
DK
2839 * regular files. If somebody wants to bmap a directory
2840 * or symlink and gets confused because the buffer
2841 * hasn't yet been flushed to disk, they deserve
2842 * everything they get.
2843 */
2844
617ba13b
MC
2845 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
2846 journal = EXT4_JOURNAL(inode);
dab291af
MC
2847 jbd2_journal_lock_updates(journal);
2848 err = jbd2_journal_flush(journal);
2849 jbd2_journal_unlock_updates(journal);
ac27a0ec
DK
2850
2851 if (err)
2852 return 0;
2853 }
2854
af5bc92d 2855 return generic_block_bmap(mapping, block, ext4_get_block);
ac27a0ec
DK
2856}
2857
2858static int bget_one(handle_t *handle, struct buffer_head *bh)
2859{
2860 get_bh(bh);
2861 return 0;
2862}
2863
2864static int bput_one(handle_t *handle, struct buffer_head *bh)
2865{
2866 put_bh(bh);
2867 return 0;
2868}
2869
ac27a0ec 2870/*
678aaf48
JK
2871 * Note that we don't need to start a transaction unless we're journaling data
2872 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2873 * need to file the inode to the transaction's list in ordered mode because if
2874 * we are writing back data added by write(), the inode is already there and if
2875 * we are writing back data modified via mmap(), noone guarantees in which
2876 * transaction the data will hit the disk. In case we are journaling data, we
2877 * cannot start transaction directly because transaction start ranks above page
2878 * lock so we have to do some magic.
ac27a0ec 2879 *
678aaf48 2880 * In all journaling modes block_write_full_page() will start the I/O.
ac27a0ec
DK
2881 *
2882 * Problem:
2883 *
617ba13b
MC
2884 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2885 * ext4_writepage()
ac27a0ec
DK
2886 *
2887 * Similar for:
2888 *
617ba13b 2889 * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
ac27a0ec 2890 *
617ba13b 2891 * Same applies to ext4_get_block(). We will deadlock on various things like
0e855ac8 2892 * lock_journal and i_data_sem
ac27a0ec
DK
2893 *
2894 * Setting PF_MEMALLOC here doesn't work - too many internal memory
2895 * allocations fail.
2896 *
2897 * 16May01: If we're reentered then journal_current_handle() will be
2898 * non-zero. We simply *return*.
2899 *
2900 * 1 July 2001: @@@ FIXME:
2901 * In journalled data mode, a data buffer may be metadata against the
2902 * current transaction. But the same file is part of a shared mapping
2903 * and someone does a writepage() on it.
2904 *
2905 * We will move the buffer onto the async_data list, but *after* it has
2906 * been dirtied. So there's a small window where we have dirty data on
2907 * BJ_Metadata.
2908 *
2909 * Note that this only applies to the last partial page in the file. The
2910 * bit which block_write_full_page() uses prepare/commit for. (That's
2911 * broken code anyway: it's wrong for msync()).
2912 *
2913 * It's a rare case: affects the final partial page, for journalled data
2914 * where the file is subject to bith write() and writepage() in the same
2915 * transction. To fix it we'll need a custom block_write_full_page().
2916 * We'll probably need that anyway for journalling writepage() output.
2917 *
2918 * We don't honour synchronous mounts for writepage(). That would be
2919 * disastrous. Any write() or metadata operation will sync the fs for
2920 * us.
2921 *
ac27a0ec 2922 */
678aaf48 2923static int __ext4_normal_writepage(struct page *page,
cf108bca
JK
2924 struct writeback_control *wbc)
2925{
2926 struct inode *inode = page->mapping->host;
2927
2928 if (test_opt(inode->i_sb, NOBH))
f0e6c985
AK
2929 return nobh_writepage(page,
2930 ext4_normal_get_block_write, wbc);
cf108bca 2931 else
f0e6c985
AK
2932 return block_write_full_page(page,
2933 ext4_normal_get_block_write,
2934 wbc);
cf108bca
JK
2935}
2936
678aaf48 2937static int ext4_normal_writepage(struct page *page,
ac27a0ec
DK
2938 struct writeback_control *wbc)
2939{
2940 struct inode *inode = page->mapping->host;
cf108bca
JK
2941 loff_t size = i_size_read(inode);
2942 loff_t len;
2943
ba80b101
TT
2944 trace_mark(ext4_normal_writepage,
2945 "dev %s ino %lu page_index %lu",
2946 inode->i_sb->s_id, inode->i_ino, page->index);
cf108bca 2947 J_ASSERT(PageLocked(page));
cf108bca
JK
2948 if (page->index == size >> PAGE_CACHE_SHIFT)
2949 len = size & ~PAGE_CACHE_MASK;
2950 else
2951 len = PAGE_CACHE_SIZE;
f0e6c985
AK
2952
2953 if (page_has_buffers(page)) {
2954 /* if page has buffers it should all be mapped
2955 * and allocated. If there are not buffers attached
2956 * to the page we know the page is dirty but it lost
2957 * buffers. That means that at some moment in time
2958 * after write_begin() / write_end() has been called
2959 * all buffers have been clean and thus they must have been
2960 * written at least once. So they are all mapped and we can
2961 * happily proceed with mapping them and writing the page.
2962 */
2963 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
2964 ext4_bh_unmapped_or_delay));
2965 }
cf108bca
JK
2966
2967 if (!ext4_journal_current_handle())
678aaf48 2968 return __ext4_normal_writepage(page, wbc);
cf108bca
JK
2969
2970 redirty_page_for_writepage(wbc, page);
2971 unlock_page(page);
2972 return 0;
2973}
2974
2975static int __ext4_journalled_writepage(struct page *page,
2976 struct writeback_control *wbc)
2977{
2978 struct address_space *mapping = page->mapping;
2979 struct inode *inode = mapping->host;
2980 struct buffer_head *page_bufs;
ac27a0ec
DK
2981 handle_t *handle = NULL;
2982 int ret = 0;
2983 int err;
2984
f0e6c985
AK
2985 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2986 ext4_normal_get_block_write);
cf108bca
JK
2987 if (ret != 0)
2988 goto out_unlock;
2989
2990 page_bufs = page_buffers(page);
2991 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
2992 bget_one);
2993 /* As soon as we unlock the page, it can go away, but we have
2994 * references to buffers so we are safe */
2995 unlock_page(page);
ac27a0ec 2996
617ba13b 2997 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
ac27a0ec
DK
2998 if (IS_ERR(handle)) {
2999 ret = PTR_ERR(handle);
cf108bca 3000 goto out;
ac27a0ec
DK
3001 }
3002
cf108bca
JK
3003 ret = walk_page_buffers(handle, page_bufs, 0,
3004 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
ac27a0ec 3005
cf108bca
JK
3006 err = walk_page_buffers(handle, page_bufs, 0,
3007 PAGE_CACHE_SIZE, NULL, write_end_fn);
3008 if (ret == 0)
3009 ret = err;
617ba13b 3010 err = ext4_journal_stop(handle);
ac27a0ec
DK
3011 if (!ret)
3012 ret = err;
ac27a0ec 3013
cf108bca
JK
3014 walk_page_buffers(handle, page_bufs, 0,
3015 PAGE_CACHE_SIZE, NULL, bput_one);
3016 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
3017 goto out;
3018
3019out_unlock:
ac27a0ec 3020 unlock_page(page);
cf108bca 3021out:
ac27a0ec
DK
3022 return ret;
3023}
3024
617ba13b 3025static int ext4_journalled_writepage(struct page *page,
ac27a0ec
DK
3026 struct writeback_control *wbc)
3027{
3028 struct inode *inode = page->mapping->host;
cf108bca
JK
3029 loff_t size = i_size_read(inode);
3030 loff_t len;
ac27a0ec 3031
ba80b101
TT
3032 trace_mark(ext4_journalled_writepage,
3033 "dev %s ino %lu page_index %lu",
3034 inode->i_sb->s_id, inode->i_ino, page->index);
cf108bca 3035 J_ASSERT(PageLocked(page));
cf108bca
JK
3036 if (page->index == size >> PAGE_CACHE_SHIFT)
3037 len = size & ~PAGE_CACHE_MASK;
3038 else
3039 len = PAGE_CACHE_SIZE;
f0e6c985
AK
3040
3041 if (page_has_buffers(page)) {
3042 /* if page has buffers it should all be mapped
3043 * and allocated. If there are not buffers attached
3044 * to the page we know the page is dirty but it lost
3045 * buffers. That means that at some moment in time
3046 * after write_begin() / write_end() has been called
3047 * all buffers have been clean and thus they must have been
3048 * written at least once. So they are all mapped and we can
3049 * happily proceed with mapping them and writing the page.
3050 */
3051 BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3052 ext4_bh_unmapped_or_delay));
3053 }
ac27a0ec 3054
cf108bca 3055 if (ext4_journal_current_handle())
ac27a0ec 3056 goto no_write;
ac27a0ec 3057
cf108bca 3058 if (PageChecked(page)) {
ac27a0ec
DK
3059 /*
3060 * It's mmapped pagecache. Add buffers and journal it. There
3061 * doesn't seem much point in redirtying the page here.
3062 */
3063 ClearPageChecked(page);
cf108bca 3064 return __ext4_journalled_writepage(page, wbc);
ac27a0ec
DK
3065 } else {
3066 /*
3067 * It may be a page full of checkpoint-mode buffers. We don't
3068 * really know unless we go poke around in the buffer_heads.
3069 * But block_write_full_page will do the right thing.
3070 */
f0e6c985
AK
3071 return block_write_full_page(page,
3072 ext4_normal_get_block_write,
3073 wbc);
ac27a0ec 3074 }
ac27a0ec
DK
3075no_write:
3076 redirty_page_for_writepage(wbc, page);
ac27a0ec 3077 unlock_page(page);
cf108bca 3078 return 0;
ac27a0ec
DK
3079}
3080
617ba13b 3081static int ext4_readpage(struct file *file, struct page *page)
ac27a0ec 3082{
617ba13b 3083 return mpage_readpage(page, ext4_get_block);
ac27a0ec
DK
3084}
3085
3086static int
617ba13b 3087ext4_readpages(struct file *file, struct address_space *mapping,
ac27a0ec
DK
3088 struct list_head *pages, unsigned nr_pages)
3089{
617ba13b 3090 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
ac27a0ec
DK
3091}
3092
617ba13b 3093static void ext4_invalidatepage(struct page *page, unsigned long offset)
ac27a0ec 3094{
617ba13b 3095 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
ac27a0ec
DK
3096
3097 /*
3098 * If it's a full truncate we just forget about the pending dirtying
3099 */
3100 if (offset == 0)
3101 ClearPageChecked(page);
3102
0390131b
FM
3103 if (journal)
3104 jbd2_journal_invalidatepage(journal, page, offset);
3105 else
3106 block_invalidatepage(page, offset);
ac27a0ec
DK
3107}
3108
617ba13b 3109static int ext4_releasepage(struct page *page, gfp_t wait)
ac27a0ec 3110{
617ba13b 3111 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
ac27a0ec
DK
3112
3113 WARN_ON(PageChecked(page));
3114 if (!page_has_buffers(page))
3115 return 0;
0390131b
FM
3116 if (journal)
3117 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3118 else
3119 return try_to_free_buffers(page);
ac27a0ec
DK
3120}
3121
3122/*
3123 * If the O_DIRECT write will extend the file then add this inode to the
3124 * orphan list. So recovery will truncate it back to the original size
3125 * if the machine crashes during the write.
3126 *
3127 * If the O_DIRECT write is intantiating holes inside i_size and the machine
7fb5409d
JK
3128 * crashes then stale disk data _may_ be exposed inside the file. But current
3129 * VFS code falls back into buffered path in that case so we are safe.
ac27a0ec 3130 */
617ba13b 3131static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
ac27a0ec
DK
3132 const struct iovec *iov, loff_t offset,
3133 unsigned long nr_segs)
3134{
3135 struct file *file = iocb->ki_filp;
3136 struct inode *inode = file->f_mapping->host;
617ba13b 3137 struct ext4_inode_info *ei = EXT4_I(inode);
7fb5409d 3138 handle_t *handle;
ac27a0ec
DK
3139 ssize_t ret;
3140 int orphan = 0;
3141 size_t count = iov_length(iov, nr_segs);
3142
3143 if (rw == WRITE) {
3144 loff_t final_size = offset + count;
3145
ac27a0ec 3146 if (final_size > inode->i_size) {
7fb5409d
JK
3147 /* Credits for sb + inode write */
3148 handle = ext4_journal_start(inode, 2);
3149 if (IS_ERR(handle)) {
3150 ret = PTR_ERR(handle);
3151 goto out;
3152 }
617ba13b 3153 ret = ext4_orphan_add(handle, inode);
7fb5409d
JK
3154 if (ret) {
3155 ext4_journal_stop(handle);
3156 goto out;
3157 }
ac27a0ec
DK
3158 orphan = 1;
3159 ei->i_disksize = inode->i_size;
7fb5409d 3160 ext4_journal_stop(handle);
ac27a0ec
DK
3161 }
3162 }
3163
3164 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3165 offset, nr_segs,
617ba13b 3166 ext4_get_block, NULL);
ac27a0ec 3167
7fb5409d 3168 if (orphan) {
ac27a0ec
DK
3169 int err;
3170
7fb5409d
JK
3171 /* Credits for sb + inode write */
3172 handle = ext4_journal_start(inode, 2);
3173 if (IS_ERR(handle)) {
3174 /* This is really bad luck. We've written the data
3175 * but cannot extend i_size. Bail out and pretend
3176 * the write failed... */
3177 ret = PTR_ERR(handle);
3178 goto out;
3179 }
3180 if (inode->i_nlink)
617ba13b 3181 ext4_orphan_del(handle, inode);
7fb5409d 3182 if (ret > 0) {
ac27a0ec
DK
3183 loff_t end = offset + ret;
3184 if (end > inode->i_size) {
3185 ei->i_disksize = end;
3186 i_size_write(inode, end);
3187 /*
3188 * We're going to return a positive `ret'
3189 * here due to non-zero-length I/O, so there's
3190 * no way of reporting error returns from
617ba13b 3191 * ext4_mark_inode_dirty() to userspace. So
ac27a0ec
DK
3192 * ignore it.
3193 */
617ba13b 3194 ext4_mark_inode_dirty(handle, inode);
ac27a0ec
DK
3195 }
3196 }
617ba13b 3197 err = ext4_journal_stop(handle);
ac27a0ec
DK
3198 if (ret == 0)
3199 ret = err;
3200 }
3201out:
3202 return ret;
3203}
3204
3205/*
617ba13b 3206 * Pages can be marked dirty completely asynchronously from ext4's journalling
ac27a0ec
DK
3207 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3208 * much here because ->set_page_dirty is called under VFS locks. The page is
3209 * not necessarily locked.
3210 *
3211 * We cannot just dirty the page and leave attached buffers clean, because the
3212 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3213 * or jbddirty because all the journalling code will explode.
3214 *
3215 * So what we do is to mark the page "pending dirty" and next time writepage
3216 * is called, propagate that into the buffers appropriately.
3217 */
617ba13b 3218static int ext4_journalled_set_page_dirty(struct page *page)
ac27a0ec
DK
3219{
3220 SetPageChecked(page);
3221 return __set_page_dirty_nobuffers(page);
3222}
3223
617ba13b 3224static const struct address_space_operations ext4_ordered_aops = {
8ab22b9a
HH
3225 .readpage = ext4_readpage,
3226 .readpages = ext4_readpages,
3227 .writepage = ext4_normal_writepage,
3228 .sync_page = block_sync_page,
3229 .write_begin = ext4_write_begin,
3230 .write_end = ext4_ordered_write_end,
3231 .bmap = ext4_bmap,
3232 .invalidatepage = ext4_invalidatepage,
3233 .releasepage = ext4_releasepage,
3234 .direct_IO = ext4_direct_IO,
3235 .migratepage = buffer_migrate_page,
3236 .is_partially_uptodate = block_is_partially_uptodate,
ac27a0ec
DK
3237};
3238
617ba13b 3239static const struct address_space_operations ext4_writeback_aops = {
8ab22b9a
HH
3240 .readpage = ext4_readpage,
3241 .readpages = ext4_readpages,
3242 .writepage = ext4_normal_writepage,
3243 .sync_page = block_sync_page,
3244 .write_begin = ext4_write_begin,
3245 .write_end = ext4_writeback_write_end,
3246 .bmap = ext4_bmap,
3247 .invalidatepage = ext4_invalidatepage,
3248 .releasepage = ext4_releasepage,
3249 .direct_IO = ext4_direct_IO,
3250 .migratepage = buffer_migrate_page,
3251 .is_partially_uptodate = block_is_partially_uptodate,
ac27a0ec
DK
3252};
3253
617ba13b 3254static const struct address_space_operations ext4_journalled_aops = {
8ab22b9a
HH
3255 .readpage = ext4_readpage,
3256 .readpages = ext4_readpages,
3257 .writepage = ext4_journalled_writepage,
3258 .sync_page = block_sync_page,
3259 .write_begin = ext4_write_begin,
3260 .write_end = ext4_journalled_write_end,
3261 .set_page_dirty = ext4_journalled_set_page_dirty,
3262 .bmap = ext4_bmap,
3263 .invalidatepage = ext4_invalidatepage,
3264 .releasepage = ext4_releasepage,
3265 .is_partially_uptodate = block_is_partially_uptodate,
ac27a0ec
DK
3266};
3267
64769240 3268static const struct address_space_operations ext4_da_aops = {
8ab22b9a
HH
3269 .readpage = ext4_readpage,
3270 .readpages = ext4_readpages,
3271 .writepage = ext4_da_writepage,
3272 .writepages = ext4_da_writepages,
3273 .sync_page = block_sync_page,
3274 .write_begin = ext4_da_write_begin,
3275 .write_end = ext4_da_write_end,
3276 .bmap = ext4_bmap,
3277 .invalidatepage = ext4_da_invalidatepage,
3278 .releasepage = ext4_releasepage,
3279 .direct_IO = ext4_direct_IO,
3280 .migratepage = buffer_migrate_page,
3281 .is_partially_uptodate = block_is_partially_uptodate,
64769240
AT
3282};
3283
617ba13b 3284void ext4_set_aops(struct inode *inode)
ac27a0ec 3285{
cd1aac32
AK
3286 if (ext4_should_order_data(inode) &&
3287 test_opt(inode->i_sb, DELALLOC))
3288 inode->i_mapping->a_ops = &ext4_da_aops;
3289 else if (ext4_should_order_data(inode))
617ba13b 3290 inode->i_mapping->a_ops = &ext4_ordered_aops;
64769240
AT
3291 else if (ext4_should_writeback_data(inode) &&
3292 test_opt(inode->i_sb, DELALLOC))
3293 inode->i_mapping->a_ops = &ext4_da_aops;
617ba13b
MC
3294 else if (ext4_should_writeback_data(inode))
3295 inode->i_mapping->a_ops = &ext4_writeback_aops;
ac27a0ec 3296 else
617ba13b 3297 inode->i_mapping->a_ops = &ext4_journalled_aops;
ac27a0ec
DK
3298}
3299
3300/*
617ba13b 3301 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
ac27a0ec
DK
3302 * up to the end of the block which corresponds to `from'.
3303 * This required during truncate. We need to physically zero the tail end
3304 * of that block so it doesn't yield old data if the file is later grown.
3305 */
cf108bca 3306int ext4_block_truncate_page(handle_t *handle,
ac27a0ec
DK
3307 struct address_space *mapping, loff_t from)
3308{
617ba13b 3309 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
ac27a0ec 3310 unsigned offset = from & (PAGE_CACHE_SIZE-1);
725d26d3
AK
3311 unsigned blocksize, length, pos;
3312 ext4_lblk_t iblock;
ac27a0ec
DK
3313 struct inode *inode = mapping->host;
3314 struct buffer_head *bh;
cf108bca 3315 struct page *page;
ac27a0ec 3316 int err = 0;
ac27a0ec 3317
cf108bca
JK
3318 page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
3319 if (!page)
3320 return -EINVAL;
3321
ac27a0ec
DK
3322 blocksize = inode->i_sb->s_blocksize;
3323 length = blocksize - (offset & (blocksize - 1));
3324 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3325
3326 /*
3327 * For "nobh" option, we can only work if we don't need to
3328 * read-in the page - otherwise we create buffers to do the IO.
3329 */
3330 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
617ba13b 3331 ext4_should_writeback_data(inode) && PageUptodate(page)) {
eebd2aa3 3332 zero_user(page, offset, length);
ac27a0ec
DK
3333 set_page_dirty(page);
3334 goto unlock;
3335 }
3336
3337 if (!page_has_buffers(page))
3338 create_empty_buffers(page, blocksize, 0);
3339
3340 /* Find the buffer that contains "offset" */
3341 bh = page_buffers(page);
3342 pos = blocksize;
3343 while (offset >= pos) {
3344 bh = bh->b_this_page;
3345 iblock++;
3346 pos += blocksize;
3347 }
3348
3349 err = 0;
3350 if (buffer_freed(bh)) {
3351 BUFFER_TRACE(bh, "freed: skip");
3352 goto unlock;
3353 }
3354
3355 if (!buffer_mapped(bh)) {
3356 BUFFER_TRACE(bh, "unmapped");
617ba13b 3357 ext4_get_block(inode, iblock, bh, 0);
ac27a0ec
DK
3358 /* unmapped? It's a hole - nothing to do */
3359 if (!buffer_mapped(bh)) {
3360 BUFFER_TRACE(bh, "still unmapped");
3361 goto unlock;
3362 }
3363 }
3364
3365 /* Ok, it's mapped. Make sure it's up-to-date */
3366 if (PageUptodate(page))
3367 set_buffer_uptodate(bh);
3368
3369 if (!buffer_uptodate(bh)) {
3370 err = -EIO;
3371 ll_rw_block(READ, 1, &bh);
3372 wait_on_buffer(bh);
3373 /* Uhhuh. Read error. Complain and punt. */
3374 if (!buffer_uptodate(bh))
3375 goto unlock;
3376 }
3377
617ba13b 3378 if (ext4_should_journal_data(inode)) {
ac27a0ec 3379 BUFFER_TRACE(bh, "get write access");
617ba13b 3380 err = ext4_journal_get_write_access(handle, bh);
ac27a0ec
DK
3381 if (err)
3382 goto unlock;
3383 }
3384
eebd2aa3 3385 zero_user(page, offset, length);
ac27a0ec
DK
3386
3387 BUFFER_TRACE(bh, "zeroed end of block");
3388
3389 err = 0;
617ba13b 3390 if (ext4_should_journal_data(inode)) {
0390131b 3391 err = ext4_handle_dirty_metadata(handle, inode, bh);
ac27a0ec 3392 } else {
617ba13b 3393 if (ext4_should_order_data(inode))
678aaf48 3394 err = ext4_jbd2_file_inode(handle, inode);
ac27a0ec
DK
3395 mark_buffer_dirty(bh);
3396 }
3397
3398unlock:
3399 unlock_page(page);
3400 page_cache_release(page);
3401 return err;
3402}
3403
3404/*
3405 * Probably it should be a library function... search for first non-zero word
3406 * or memcmp with zero_page, whatever is better for particular architecture.
3407 * Linus?
3408 */
3409static inline int all_zeroes(__le32 *p, __le32 *q)
3410{
3411 while (p < q)
3412 if (*p++)
3413 return 0;
3414 return 1;
3415}
3416
3417/**
617ba13b 3418 * ext4_find_shared - find the indirect blocks for partial truncation.
ac27a0ec
DK
3419 * @inode: inode in question
3420 * @depth: depth of the affected branch
617ba13b 3421 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
ac27a0ec
DK
3422 * @chain: place to store the pointers to partial indirect blocks
3423 * @top: place to the (detached) top of branch
3424 *
617ba13b 3425 * This is a helper function used by ext4_truncate().
ac27a0ec
DK
3426 *
3427 * When we do truncate() we may have to clean the ends of several
3428 * indirect blocks but leave the blocks themselves alive. Block is
3429 * partially truncated if some data below the new i_size is refered
3430 * from it (and it is on the path to the first completely truncated
3431 * data block, indeed). We have to free the top of that path along
3432 * with everything to the right of the path. Since no allocation
617ba13b 3433 * past the truncation point is possible until ext4_truncate()
ac27a0ec
DK
3434 * finishes, we may safely do the latter, but top of branch may
3435 * require special attention - pageout below the truncation point
3436 * might try to populate it.
3437 *
3438 * We atomically detach the top of branch from the tree, store the
3439 * block number of its root in *@top, pointers to buffer_heads of
3440 * partially truncated blocks - in @chain[].bh and pointers to
3441 * their last elements that should not be removed - in
3442 * @chain[].p. Return value is the pointer to last filled element
3443 * of @chain.
3444 *
3445 * The work left to caller to do the actual freeing of subtrees:
3446 * a) free the subtree starting from *@top
3447 * b) free the subtrees whose roots are stored in
3448 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
3449 * c) free the subtrees growing from the inode past the @chain[0].
3450 * (no partially truncated stuff there). */
3451
617ba13b 3452static Indirect *ext4_find_shared(struct inode *inode, int depth,
725d26d3 3453 ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
ac27a0ec
DK
3454{
3455 Indirect *partial, *p;
3456 int k, err;
3457
3458 *top = 0;
3459 /* Make k index the deepest non-null offest + 1 */
3460 for (k = depth; k > 1 && !offsets[k-1]; k--)
3461 ;
617ba13b 3462 partial = ext4_get_branch(inode, k, offsets, chain, &err);
ac27a0ec
DK
3463 /* Writer: pointers */
3464 if (!partial)
3465 partial = chain + k-1;
3466 /*
3467 * If the branch acquired continuation since we've looked at it -
3468 * fine, it should all survive and (new) top doesn't belong to us.
3469 */
3470 if (!partial->key && *partial->p)
3471 /* Writer: end */
3472 goto no_top;
af5bc92d 3473 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
ac27a0ec
DK
3474 ;
3475 /*
3476 * OK, we've found the last block that must survive. The rest of our
3477 * branch should be detached before unlocking. However, if that rest
3478 * of branch is all ours and does not grow immediately from the inode
3479 * it's easier to cheat and just decrement partial->p.
3480 */
3481 if (p == chain + k - 1 && p > chain) {
3482 p->p--;
3483 } else {
3484 *top = *p->p;
617ba13b 3485 /* Nope, don't do this in ext4. Must leave the tree intact */
ac27a0ec
DK
3486#if 0
3487 *p->p = 0;
3488#endif
3489 }
3490 /* Writer: end */
3491
af5bc92d 3492 while (partial > p) {
ac27a0ec
DK
3493 brelse(partial->bh);
3494 partial--;
3495 }
3496no_top:
3497 return partial;
3498}
3499
3500/*
3501 * Zero a number of block pointers in either an inode or an indirect block.
3502 * If we restart the transaction we must again get write access to the
3503 * indirect block for further modification.
3504 *
3505 * We release `count' blocks on disk, but (last - first) may be greater
3506 * than `count' because there can be holes in there.
3507 */
617ba13b
MC
3508static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
3509 struct buffer_head *bh, ext4_fsblk_t block_to_free,
ac27a0ec
DK
3510 unsigned long count, __le32 *first, __le32 *last)
3511{
3512 __le32 *p;
3513 if (try_to_extend_transaction(handle, inode)) {
3514 if (bh) {
0390131b
FM
3515 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
3516 ext4_handle_dirty_metadata(handle, inode, bh);
ac27a0ec 3517 }
617ba13b
MC
3518 ext4_mark_inode_dirty(handle, inode);
3519 ext4_journal_test_restart(handle, inode);
ac27a0ec
DK
3520 if (bh) {
3521 BUFFER_TRACE(bh, "retaking write access");
617ba13b 3522 ext4_journal_get_write_access(handle, bh);
ac27a0ec
DK
3523 }
3524 }
3525
3526 /*
3527 * Any buffers which are on the journal will be in memory. We find
dab291af 3528 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
ac27a0ec 3529 * on them. We've already detached each block from the file, so
dab291af 3530 * bforget() in jbd2_journal_forget() should be safe.
ac27a0ec 3531 *
dab291af 3532 * AKPM: turn on bforget in jbd2_journal_forget()!!!
ac27a0ec
DK
3533 */
3534 for (p = first; p < last; p++) {
3535 u32 nr = le32_to_cpu(*p);
3536 if (nr) {
1d03ec98 3537 struct buffer_head *tbh;
ac27a0ec
DK
3538
3539 *p = 0;
1d03ec98
AK
3540 tbh = sb_find_get_block(inode->i_sb, nr);
3541 ext4_forget(handle, 0, inode, tbh, nr);
ac27a0ec
DK
3542 }
3543 }
3544
c9de560d 3545 ext4_free_blocks(handle, inode, block_to_free, count, 0);
ac27a0ec
DK
3546}
3547
3548/**
617ba13b 3549 * ext4_free_data - free a list of data blocks
ac27a0ec
DK
3550 * @handle: handle for this transaction
3551 * @inode: inode we are dealing with
3552 * @this_bh: indirect buffer_head which contains *@first and *@last
3553 * @first: array of block numbers
3554 * @last: points immediately past the end of array
3555 *
3556 * We are freeing all blocks refered from that array (numbers are stored as
3557 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
3558 *
3559 * We accumulate contiguous runs of blocks to free. Conveniently, if these
3560 * blocks are contiguous then releasing them at one time will only affect one
3561 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
3562 * actually use a lot of journal space.
3563 *
3564 * @this_bh will be %NULL if @first and @last point into the inode's direct
3565 * block pointers.
3566 */
617ba13b 3567static void ext4_free_data(handle_t *handle, struct inode *inode,
ac27a0ec
DK
3568 struct buffer_head *this_bh,
3569 __le32 *first, __le32 *last)
3570{
617ba13b 3571 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
ac27a0ec
DK
3572 unsigned long count = 0; /* Number of blocks in the run */
3573 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
3574 corresponding to
3575 block_to_free */
617ba13b 3576 ext4_fsblk_t nr; /* Current block # */
ac27a0ec
DK
3577 __le32 *p; /* Pointer into inode/ind
3578 for current block */
3579 int err;
3580
3581 if (this_bh) { /* For indirect block */
3582 BUFFER_TRACE(this_bh, "get_write_access");
617ba13b 3583 err = ext4_journal_get_write_access(handle, this_bh);
ac27a0ec
DK
3584 /* Important: if we can't update the indirect pointers
3585 * to the blocks, we can't free them. */
3586 if (err)
3587 return;
3588 }
3589
3590 for (p = first; p < last; p++) {
3591 nr = le32_to_cpu(*p);
3592 if (nr) {
3593 /* accumulate blocks to free if they're contiguous */
3594 if (count == 0) {
3595 block_to_free = nr;
3596 block_to_free_p = p;
3597 count = 1;
3598 } else if (nr == block_to_free + count) {
3599 count++;
3600 } else {
617ba13b 3601 ext4_clear_blocks(handle, inode, this_bh,
ac27a0ec
DK
3602 block_to_free,
3603 count, block_to_free_p, p);
3604 block_to_free = nr;
3605 block_to_free_p = p;
3606 count = 1;
3607 }
3608 }
3609 }
3610
3611 if (count > 0)
617ba13b 3612 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
ac27a0ec
DK
3613 count, block_to_free_p, p);
3614
3615 if (this_bh) {
0390131b 3616 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
71dc8fbc
DG
3617
3618 /*
3619 * The buffer head should have an attached journal head at this
3620 * point. However, if the data is corrupted and an indirect
3621 * block pointed to itself, it would have been detached when
3622 * the block was cleared. Check for this instead of OOPSing.
3623 */
e7f07968 3624 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
0390131b 3625 ext4_handle_dirty_metadata(handle, inode, this_bh);
71dc8fbc
DG
3626 else
3627 ext4_error(inode->i_sb, __func__,
3628 "circular indirect block detected, "
3629 "inode=%lu, block=%llu",
3630 inode->i_ino,
3631 (unsigned long long) this_bh->b_blocknr);
ac27a0ec
DK
3632 }
3633}
3634
3635/**
617ba13b 3636 * ext4_free_branches - free an array of branches
ac27a0ec
DK
3637 * @handle: JBD handle for this transaction
3638 * @inode: inode we are dealing with
3639 * @parent_bh: the buffer_head which contains *@first and *@last
3640 * @first: array of block numbers
3641 * @last: pointer immediately past the end of array
3642 * @depth: depth of the branches to free
3643 *
3644 * We are freeing all blocks refered from these branches (numbers are
3645 * stored as little-endian 32-bit) and updating @inode->i_blocks
3646 * appropriately.
3647 */
617ba13b 3648static void ext4_free_branches(handle_t *handle, struct inode *inode,
ac27a0ec
DK
3649 struct buffer_head *parent_bh,
3650 __le32 *first, __le32 *last, int depth)
3651{
617ba13b 3652 ext4_fsblk_t nr;
ac27a0ec
DK
3653 __le32 *p;
3654
0390131b 3655 if (ext4_handle_is_aborted(handle))
ac27a0ec
DK
3656 return;
3657
3658 if (depth--) {
3659 struct buffer_head *bh;
617ba13b 3660 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
ac27a0ec
DK
3661 p = last;
3662 while (--p >= first) {
3663 nr = le32_to_cpu(*p);
3664 if (!nr)
3665 continue; /* A hole */
3666
3667 /* Go read the buffer for the next level down */
3668 bh = sb_bread(inode->i_sb, nr);
3669
3670 /*
3671 * A read failure? Report error and clear slot
3672 * (should be rare).
3673 */
3674 if (!bh) {
617ba13b 3675 ext4_error(inode->i_sb, "ext4_free_branches",
2ae02107 3676 "Read failure, inode=%lu, block=%llu",
ac27a0ec
DK
3677 inode->i_ino, nr);
3678 continue;
3679 }
3680
3681 /* This zaps the entire block. Bottom up. */
3682 BUFFER_TRACE(bh, "free child branches");
617ba13b 3683 ext4_free_branches(handle, inode, bh,
af5bc92d
TT
3684 (__le32 *) bh->b_data,
3685 (__le32 *) bh->b_data + addr_per_block,
3686 depth);
ac27a0ec
DK
3687
3688 /*
3689 * We've probably journalled the indirect block several
3690 * times during the truncate. But it's no longer
3691 * needed and we now drop it from the transaction via
dab291af 3692 * jbd2_journal_revoke().
ac27a0ec
DK
3693 *
3694 * That's easy if it's exclusively part of this
3695 * transaction. But if it's part of the committing
dab291af 3696 * transaction then jbd2_journal_forget() will simply
ac27a0ec 3697 * brelse() it. That means that if the underlying
617ba13b 3698 * block is reallocated in ext4_get_block(),
ac27a0ec
DK
3699 * unmap_underlying_metadata() will find this block
3700 * and will try to get rid of it. damn, damn.
3701 *
3702 * If this block has already been committed to the
3703 * journal, a revoke record will be written. And
3704 * revoke records must be emitted *before* clearing
3705 * this block's bit in the bitmaps.
3706 */
617ba13b 3707 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
ac27a0ec
DK
3708
3709 /*
3710 * Everything below this this pointer has been
3711 * released. Now let this top-of-subtree go.
3712 *
3713 * We want the freeing of this indirect block to be
3714 * atomic in the journal with the updating of the
3715 * bitmap block which owns it. So make some room in
3716 * the journal.
3717 *
3718 * We zero the parent pointer *after* freeing its
3719 * pointee in the bitmaps, so if extend_transaction()
3720 * for some reason fails to put the bitmap changes and
3721 * the release into the same transaction, recovery
3722 * will merely complain about releasing a free block,
3723 * rather than leaking blocks.
3724 */
0390131b 3725 if (ext4_handle_is_aborted(handle))
ac27a0ec
DK
3726 return;
3727 if (try_to_extend_transaction(handle, inode)) {
617ba13b
MC
3728 ext4_mark_inode_dirty(handle, inode);
3729 ext4_journal_test_restart(handle, inode);
ac27a0ec
DK
3730 }
3731
c9de560d 3732 ext4_free_blocks(handle, inode, nr, 1, 1);
ac27a0ec
DK
3733
3734 if (parent_bh) {
3735 /*
3736 * The block which we have just freed is
3737 * pointed to by an indirect block: journal it
3738 */
3739 BUFFER_TRACE(parent_bh, "get_write_access");
617ba13b 3740 if (!ext4_journal_get_write_access(handle,
ac27a0ec
DK
3741 parent_bh)){
3742 *p = 0;
3743 BUFFER_TRACE(parent_bh,
0390131b
FM
3744 "call ext4_handle_dirty_metadata");
3745 ext4_handle_dirty_metadata(handle,
3746 inode,
3747 parent_bh);
ac27a0ec
DK
3748 }
3749 }
3750 }
3751 } else {
3752 /* We have reached the bottom of the tree. */
3753 BUFFER_TRACE(parent_bh, "free data blocks");
617ba13b 3754 ext4_free_data(handle, inode, parent_bh, first, last);
ac27a0ec
DK
3755 }
3756}
3757
91ef4caf
DG
3758int ext4_can_truncate(struct inode *inode)
3759{
3760 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3761 return 0;
3762 if (S_ISREG(inode->i_mode))
3763 return 1;
3764 if (S_ISDIR(inode->i_mode))
3765 return 1;
3766 if (S_ISLNK(inode->i_mode))
3767 return !ext4_inode_is_fast_symlink(inode);
3768 return 0;
3769}
3770
ac27a0ec 3771/*
617ba13b 3772 * ext4_truncate()
ac27a0ec 3773 *
617ba13b
MC
3774 * We block out ext4_get_block() block instantiations across the entire
3775 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
ac27a0ec
DK
3776 * simultaneously on behalf of the same inode.
3777 *
3778 * As we work through the truncate and commmit bits of it to the journal there
3779 * is one core, guiding principle: the file's tree must always be consistent on
3780 * disk. We must be able to restart the truncate after a crash.
3781 *
3782 * The file's tree may be transiently inconsistent in memory (although it
3783 * probably isn't), but whenever we close off and commit a journal transaction,
3784 * the contents of (the filesystem + the journal) must be consistent and
3785 * restartable. It's pretty simple, really: bottom up, right to left (although
3786 * left-to-right works OK too).
3787 *
3788 * Note that at recovery time, journal replay occurs *before* the restart of
3789 * truncate against the orphan inode list.
3790 *
3791 * The committed inode has the new, desired i_size (which is the same as
617ba13b 3792 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
ac27a0ec 3793 * that this inode's truncate did not complete and it will again call
617ba13b
MC
3794 * ext4_truncate() to have another go. So there will be instantiated blocks
3795 * to the right of the truncation point in a crashed ext4 filesystem. But
ac27a0ec 3796 * that's fine - as long as they are linked from the inode, the post-crash
617ba13b 3797 * ext4_truncate() run will find them and release them.
ac27a0ec 3798 */
617ba13b 3799void ext4_truncate(struct inode *inode)
ac27a0ec
DK
3800{
3801 handle_t *handle;
617ba13b 3802 struct ext4_inode_info *ei = EXT4_I(inode);
ac27a0ec 3803 __le32 *i_data = ei->i_data;
617ba13b 3804 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
ac27a0ec 3805 struct address_space *mapping = inode->i_mapping;
725d26d3 3806 ext4_lblk_t offsets[4];
ac27a0ec
DK
3807 Indirect chain[4];
3808 Indirect *partial;
3809 __le32 nr = 0;
3810 int n;
725d26d3 3811 ext4_lblk_t last_block;
ac27a0ec 3812 unsigned blocksize = inode->i_sb->s_blocksize;
ac27a0ec 3813
91ef4caf 3814 if (!ext4_can_truncate(inode))
ac27a0ec
DK
3815 return;
3816
1d03ec98 3817 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
cf108bca 3818 ext4_ext_truncate(inode);
1d03ec98
AK
3819 return;
3820 }
a86c6181 3821
ac27a0ec 3822 handle = start_transaction(inode);
cf108bca 3823 if (IS_ERR(handle))
ac27a0ec 3824 return; /* AKPM: return what? */
ac27a0ec
DK
3825
3826 last_block = (inode->i_size + blocksize-1)
617ba13b 3827 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
ac27a0ec 3828
cf108bca
JK
3829 if (inode->i_size & (blocksize - 1))
3830 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
3831 goto out_stop;
ac27a0ec 3832
617ba13b 3833 n = ext4_block_to_path(inode, last_block, offsets, NULL);
ac27a0ec
DK
3834 if (n == 0)
3835 goto out_stop; /* error */
3836
3837 /*
3838 * OK. This truncate is going to happen. We add the inode to the
3839 * orphan list, so that if this truncate spans multiple transactions,
3840 * and we crash, we will resume the truncate when the filesystem
3841 * recovers. It also marks the inode dirty, to catch the new size.
3842 *
3843 * Implication: the file must always be in a sane, consistent
3844 * truncatable state while each transaction commits.
3845 */
617ba13b 3846 if (ext4_orphan_add(handle, inode))
ac27a0ec
DK
3847 goto out_stop;
3848
632eaeab
MC
3849 /*
3850 * From here we block out all ext4_get_block() callers who want to
3851 * modify the block allocation tree.
3852 */
3853 down_write(&ei->i_data_sem);
b4df2030 3854
c2ea3fde 3855 ext4_discard_preallocations(inode);
b4df2030 3856
ac27a0ec
DK
3857 /*
3858 * The orphan list entry will now protect us from any crash which
3859 * occurs before the truncate completes, so it is now safe to propagate
3860 * the new, shorter inode size (held for now in i_size) into the
3861 * on-disk inode. We do this via i_disksize, which is the value which
617ba13b 3862 * ext4 *really* writes onto the disk inode.
ac27a0ec
DK
3863 */
3864 ei->i_disksize = inode->i_size;
3865
ac27a0ec 3866 if (n == 1) { /* direct blocks */
617ba13b
MC
3867 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
3868 i_data + EXT4_NDIR_BLOCKS);
ac27a0ec
DK
3869 goto do_indirects;
3870 }
3871
617ba13b 3872 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
ac27a0ec
DK
3873 /* Kill the top of shared branch (not detached) */
3874 if (nr) {
3875 if (partial == chain) {
3876 /* Shared branch grows from the inode */
617ba13b 3877 ext4_free_branches(handle, inode, NULL,
ac27a0ec
DK
3878 &nr, &nr+1, (chain+n-1) - partial);
3879 *partial->p = 0;
3880 /*
3881 * We mark the inode dirty prior to restart,
3882 * and prior to stop. No need for it here.
3883 */
3884 } else {
3885 /* Shared branch grows from an indirect block */
3886 BUFFER_TRACE(partial->bh, "get_write_access");
617ba13b 3887 ext4_free_branches(handle, inode, partial->bh,
ac27a0ec
DK
3888 partial->p,
3889 partial->p+1, (chain+n-1) - partial);
3890 }
3891 }
3892 /* Clear the ends of indirect blocks on the shared branch */
3893 while (partial > chain) {
617ba13b 3894 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
ac27a0ec
DK
3895 (__le32*)partial->bh->b_data+addr_per_block,
3896 (chain+n-1) - partial);
3897 BUFFER_TRACE(partial->bh, "call brelse");
3898 brelse (partial->bh);
3899 partial--;
3900 }
3901do_indirects:
3902 /* Kill the remaining (whole) subtrees */
3903 switch (offsets[0]) {
3904 default:
617ba13b 3905 nr = i_data[EXT4_IND_BLOCK];
ac27a0ec 3906 if (nr) {
617ba13b
MC
3907 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
3908 i_data[EXT4_IND_BLOCK] = 0;
ac27a0ec 3909 }
617ba13b
MC
3910 case EXT4_IND_BLOCK:
3911 nr = i_data[EXT4_DIND_BLOCK];
ac27a0ec 3912 if (nr) {
617ba13b
MC
3913 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
3914 i_data[EXT4_DIND_BLOCK] = 0;
ac27a0ec 3915 }
617ba13b
MC
3916 case EXT4_DIND_BLOCK:
3917 nr = i_data[EXT4_TIND_BLOCK];
ac27a0ec 3918 if (nr) {
617ba13b
MC
3919 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
3920 i_data[EXT4_TIND_BLOCK] = 0;
ac27a0ec 3921 }
617ba13b 3922 case EXT4_TIND_BLOCK:
ac27a0ec
DK
3923 ;
3924 }
3925
0e855ac8 3926 up_write(&ei->i_data_sem);
ef7f3835 3927 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
617ba13b 3928 ext4_mark_inode_dirty(handle, inode);
ac27a0ec
DK
3929
3930 /*
3931 * In a multi-transaction truncate, we only make the final transaction
3932 * synchronous
3933 */
3934 if (IS_SYNC(inode))
0390131b 3935 ext4_handle_sync(handle);
ac27a0ec
DK
3936out_stop:
3937 /*
3938 * If this was a simple ftruncate(), and the file will remain alive
3939 * then we need to clear up the orphan record which we created above.
3940 * However, if this was a real unlink then we were called by
617ba13b 3941 * ext4_delete_inode(), and we allow that function to clean up the
ac27a0ec
DK
3942 * orphan info for us.
3943 */
3944 if (inode->i_nlink)
617ba13b 3945 ext4_orphan_del(handle, inode);
ac27a0ec 3946
617ba13b 3947 ext4_journal_stop(handle);
ac27a0ec
DK
3948}
3949
ac27a0ec 3950/*
617ba13b 3951 * ext4_get_inode_loc returns with an extra refcount against the inode's
ac27a0ec
DK
3952 * underlying buffer_head on success. If 'in_mem' is true, we have all
3953 * data in memory that is needed to recreate the on-disk version of this
3954 * inode.
3955 */
617ba13b
MC
3956static int __ext4_get_inode_loc(struct inode *inode,
3957 struct ext4_iloc *iloc, int in_mem)
ac27a0ec 3958{
240799cd
TT
3959 struct ext4_group_desc *gdp;
3960 struct buffer_head *bh;
3961 struct super_block *sb = inode->i_sb;
3962 ext4_fsblk_t block;
3963 int inodes_per_block, inode_offset;
3964
3a06d778 3965 iloc->bh = NULL;
240799cd
TT
3966 if (!ext4_valid_inum(sb, inode->i_ino))
3967 return -EIO;
ac27a0ec 3968
240799cd
TT
3969 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3970 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3971 if (!gdp)
ac27a0ec
DK
3972 return -EIO;
3973
240799cd
TT
3974 /*
3975 * Figure out the offset within the block group inode table
3976 */
3977 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
3978 inode_offset = ((inode->i_ino - 1) %
3979 EXT4_INODES_PER_GROUP(sb));
3980 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3981 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3982
3983 bh = sb_getblk(sb, block);
ac27a0ec 3984 if (!bh) {
240799cd
TT
3985 ext4_error(sb, "ext4_get_inode_loc", "unable to read "
3986 "inode block - inode=%lu, block=%llu",
3987 inode->i_ino, block);
ac27a0ec
DK
3988 return -EIO;
3989 }
3990 if (!buffer_uptodate(bh)) {
3991 lock_buffer(bh);
9c83a923
HK
3992
3993 /*
3994 * If the buffer has the write error flag, we have failed
3995 * to write out another inode in the same block. In this
3996 * case, we don't have to read the block because we may
3997 * read the old inode data successfully.
3998 */
3999 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4000 set_buffer_uptodate(bh);
4001
ac27a0ec
DK
4002 if (buffer_uptodate(bh)) {
4003 /* someone brought it uptodate while we waited */
4004 unlock_buffer(bh);
4005 goto has_buffer;
4006 }
4007
4008 /*
4009 * If we have all information of the inode in memory and this
4010 * is the only valid inode in the block, we need not read the
4011 * block.
4012 */
4013 if (in_mem) {
4014 struct buffer_head *bitmap_bh;
240799cd 4015 int i, start;
ac27a0ec 4016
240799cd 4017 start = inode_offset & ~(inodes_per_block - 1);
ac27a0ec 4018
240799cd
TT
4019 /* Is the inode bitmap in cache? */
4020 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
ac27a0ec
DK
4021 if (!bitmap_bh)
4022 goto make_io;
4023
4024 /*
4025 * If the inode bitmap isn't in cache then the
4026 * optimisation may end up performing two reads instead
4027 * of one, so skip it.
4028 */
4029 if (!buffer_uptodate(bitmap_bh)) {
4030 brelse(bitmap_bh);
4031 goto make_io;
4032 }
240799cd 4033 for (i = start; i < start + inodes_per_block; i++) {
ac27a0ec
DK
4034 if (i == inode_offset)
4035 continue;
617ba13b 4036 if (ext4_test_bit(i, bitmap_bh->b_data))
ac27a0ec
DK
4037 break;
4038 }
4039 brelse(bitmap_bh);
240799cd 4040 if (i == start + inodes_per_block) {
ac27a0ec
DK
4041 /* all other inodes are free, so skip I/O */
4042 memset(bh->b_data, 0, bh->b_size);
4043 set_buffer_uptodate(bh);
4044 unlock_buffer(bh);
4045 goto has_buffer;
4046 }
4047 }
4048
4049make_io:
240799cd
TT
4050 /*
4051 * If we need to do any I/O, try to pre-readahead extra
4052 * blocks from the inode table.
4053 */
4054 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4055 ext4_fsblk_t b, end, table;
4056 unsigned num;
4057
4058 table = ext4_inode_table(sb, gdp);
4059 /* Make sure s_inode_readahead_blks is a power of 2 */
4060 while (EXT4_SB(sb)->s_inode_readahead_blks &
4061 (EXT4_SB(sb)->s_inode_readahead_blks-1))
4062 EXT4_SB(sb)->s_inode_readahead_blks =
4063 (EXT4_SB(sb)->s_inode_readahead_blks &
4064 (EXT4_SB(sb)->s_inode_readahead_blks-1));
4065 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
4066 if (table > b)
4067 b = table;
4068 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
4069 num = EXT4_INODES_PER_GROUP(sb);
4070 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4071 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
560671a0 4072 num -= ext4_itable_unused_count(sb, gdp);
240799cd
TT
4073 table += num / inodes_per_block;
4074 if (end > table)
4075 end = table;
4076 while (b <= end)
4077 sb_breadahead(sb, b++);
4078 }
4079
ac27a0ec
DK
4080 /*
4081 * There are other valid inodes in the buffer, this inode
4082 * has in-inode xattrs, or we don't have this inode in memory.
4083 * Read the block from disk.
4084 */
4085 get_bh(bh);
4086 bh->b_end_io = end_buffer_read_sync;
4087 submit_bh(READ_META, bh);
4088 wait_on_buffer(bh);
4089 if (!buffer_uptodate(bh)) {
240799cd
TT
4090 ext4_error(sb, __func__,
4091 "unable to read inode block - inode=%lu, "
4092 "block=%llu", inode->i_ino, block);
ac27a0ec
DK
4093 brelse(bh);
4094 return -EIO;
4095 }
4096 }
4097has_buffer:
4098 iloc->bh = bh;
4099 return 0;
4100}
4101
617ba13b 4102int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
ac27a0ec
DK
4103{
4104 /* We have all inode data except xattrs in memory here. */
617ba13b
MC
4105 return __ext4_get_inode_loc(inode, iloc,
4106 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
ac27a0ec
DK
4107}
4108
617ba13b 4109void ext4_set_inode_flags(struct inode *inode)
ac27a0ec 4110{
617ba13b 4111 unsigned int flags = EXT4_I(inode)->i_flags;
ac27a0ec
DK
4112
4113 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
617ba13b 4114 if (flags & EXT4_SYNC_FL)
ac27a0ec 4115 inode->i_flags |= S_SYNC;
617ba13b 4116 if (flags & EXT4_APPEND_FL)
ac27a0ec 4117 inode->i_flags |= S_APPEND;
617ba13b 4118 if (flags & EXT4_IMMUTABLE_FL)
ac27a0ec 4119 inode->i_flags |= S_IMMUTABLE;
617ba13b 4120 if (flags & EXT4_NOATIME_FL)
ac27a0ec 4121 inode->i_flags |= S_NOATIME;
617ba13b 4122 if (flags & EXT4_DIRSYNC_FL)
ac27a0ec
DK
4123 inode->i_flags |= S_DIRSYNC;
4124}
4125
ff9ddf7e
JK
4126/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4127void ext4_get_inode_flags(struct ext4_inode_info *ei)
4128{
4129 unsigned int flags = ei->vfs_inode.i_flags;
4130
4131 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4132 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
4133 if (flags & S_SYNC)
4134 ei->i_flags |= EXT4_SYNC_FL;
4135 if (flags & S_APPEND)
4136 ei->i_flags |= EXT4_APPEND_FL;
4137 if (flags & S_IMMUTABLE)
4138 ei->i_flags |= EXT4_IMMUTABLE_FL;
4139 if (flags & S_NOATIME)
4140 ei->i_flags |= EXT4_NOATIME_FL;
4141 if (flags & S_DIRSYNC)
4142 ei->i_flags |= EXT4_DIRSYNC_FL;
4143}
0fc1b451
AK
4144static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4145 struct ext4_inode_info *ei)
4146{
4147 blkcnt_t i_blocks ;
8180a562
AK
4148 struct inode *inode = &(ei->vfs_inode);
4149 struct super_block *sb = inode->i_sb;
0fc1b451
AK
4150
4151 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4152 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
4153 /* we are using combined 48 bit field */
4154 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4155 le32_to_cpu(raw_inode->i_blocks_lo);
8180a562
AK
4156 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
4157 /* i_blocks represent file system block size */
4158 return i_blocks << (inode->i_blkbits - 9);
4159 } else {
4160 return i_blocks;
4161 }
0fc1b451
AK
4162 } else {
4163 return le32_to_cpu(raw_inode->i_blocks_lo);
4164 }
4165}
ff9ddf7e 4166
1d1fe1ee 4167struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ac27a0ec 4168{
617ba13b
MC
4169 struct ext4_iloc iloc;
4170 struct ext4_inode *raw_inode;
1d1fe1ee 4171 struct ext4_inode_info *ei;
ac27a0ec 4172 struct buffer_head *bh;
1d1fe1ee
DH
4173 struct inode *inode;
4174 long ret;
ac27a0ec
DK
4175 int block;
4176
1d1fe1ee
DH
4177 inode = iget_locked(sb, ino);
4178 if (!inode)
4179 return ERR_PTR(-ENOMEM);
4180 if (!(inode->i_state & I_NEW))
4181 return inode;
4182
4183 ei = EXT4_I(inode);
03010a33 4184#ifdef CONFIG_EXT4_FS_POSIX_ACL
617ba13b
MC
4185 ei->i_acl = EXT4_ACL_NOT_CACHED;
4186 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
ac27a0ec 4187#endif
ac27a0ec 4188
1d1fe1ee
DH
4189 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4190 if (ret < 0)
ac27a0ec
DK
4191 goto bad_inode;
4192 bh = iloc.bh;
617ba13b 4193 raw_inode = ext4_raw_inode(&iloc);
ac27a0ec
DK
4194 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4195 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4196 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
af5bc92d 4197 if (!(test_opt(inode->i_sb, NO_UID32))) {
ac27a0ec
DK
4198 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4199 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4200 }
4201 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
ac27a0ec
DK
4202
4203 ei->i_state = 0;
4204 ei->i_dir_start_lookup = 0;
4205 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4206 /* We now have enough fields to check if the inode was active or not.
4207 * This is needed because nfsd might try to access dead inodes
4208 * the test is that same one that e2fsck uses
4209 * NeilBrown 1999oct15
4210 */
4211 if (inode->i_nlink == 0) {
4212 if (inode->i_mode == 0 ||
617ba13b 4213 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
ac27a0ec 4214 /* this inode is deleted */
af5bc92d 4215 brelse(bh);
1d1fe1ee 4216 ret = -ESTALE;
ac27a0ec
DK
4217 goto bad_inode;
4218 }
4219 /* The only unlinked inodes we let through here have
4220 * valid i_mode and are being read by the orphan
4221 * recovery code: that's fine, we're about to complete
4222 * the process of deleting those. */
4223 }
ac27a0ec 4224 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
0fc1b451 4225 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
7973c0c1 4226 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
9b8f1f01 4227 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
a48380f7 4228 cpu_to_le32(EXT4_OS_HURD)) {
a1ddeb7e
BP
4229 ei->i_file_acl |=
4230 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
ac27a0ec 4231 }
a48380f7 4232 inode->i_size = ext4_isize(raw_inode);
ac27a0ec
DK
4233 ei->i_disksize = inode->i_size;
4234 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4235 ei->i_block_group = iloc.block_group;
4236 /*
4237 * NOTE! The in-memory inode i_data array is in little-endian order
4238 * even on big-endian machines: we do NOT byteswap the block numbers!
4239 */
617ba13b 4240 for (block = 0; block < EXT4_N_BLOCKS; block++)
ac27a0ec
DK
4241 ei->i_data[block] = raw_inode->i_block[block];
4242 INIT_LIST_HEAD(&ei->i_orphan);
4243
0040d987 4244 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ac27a0ec 4245 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
617ba13b 4246 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
e5d2861f 4247 EXT4_INODE_SIZE(inode->i_sb)) {
af5bc92d 4248 brelse(bh);
1d1fe1ee 4249 ret = -EIO;
ac27a0ec 4250 goto bad_inode;
e5d2861f 4251 }
ac27a0ec
DK
4252 if (ei->i_extra_isize == 0) {
4253 /* The extra space is currently unused. Use it. */
617ba13b
MC
4254 ei->i_extra_isize = sizeof(struct ext4_inode) -
4255 EXT4_GOOD_OLD_INODE_SIZE;
ac27a0ec
DK
4256 } else {
4257 __le32 *magic = (void *)raw_inode +
617ba13b 4258 EXT4_GOOD_OLD_INODE_SIZE +
ac27a0ec 4259 ei->i_extra_isize;
617ba13b
MC
4260 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
4261 ei->i_state |= EXT4_STATE_XATTR;
ac27a0ec
DK
4262 }
4263 } else
4264 ei->i_extra_isize = 0;
4265
ef7f3835
KS
4266 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4267 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4268 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4269 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4270
25ec56b5
JNC
4271 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4272 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4273 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4274 inode->i_version |=
4275 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4276 }
4277
ac27a0ec 4278 if (S_ISREG(inode->i_mode)) {
617ba13b
MC
4279 inode->i_op = &ext4_file_inode_operations;
4280 inode->i_fop = &ext4_file_operations;
4281 ext4_set_aops(inode);
ac27a0ec 4282 } else if (S_ISDIR(inode->i_mode)) {
617ba13b
MC
4283 inode->i_op = &ext4_dir_inode_operations;
4284 inode->i_fop = &ext4_dir_operations;
ac27a0ec 4285 } else if (S_ISLNK(inode->i_mode)) {
e83c1397 4286 if (ext4_inode_is_fast_symlink(inode)) {
617ba13b 4287 inode->i_op = &ext4_fast_symlink_inode_operations;
e83c1397
DG
4288 nd_terminate_link(ei->i_data, inode->i_size,
4289 sizeof(ei->i_data) - 1);
4290 } else {
617ba13b
MC
4291 inode->i_op = &ext4_symlink_inode_operations;
4292 ext4_set_aops(inode);
ac27a0ec
DK
4293 }
4294 } else {
617ba13b 4295 inode->i_op = &ext4_special_inode_operations;
ac27a0ec
DK
4296 if (raw_inode->i_block[0])
4297 init_special_inode(inode, inode->i_mode,
4298 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4299 else
4300 init_special_inode(inode, inode->i_mode,
4301 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4302 }
af5bc92d 4303 brelse(iloc.bh);
617ba13b 4304 ext4_set_inode_flags(inode);
1d1fe1ee
DH
4305 unlock_new_inode(inode);
4306 return inode;
ac27a0ec
DK
4307
4308bad_inode:
1d1fe1ee
DH
4309 iget_failed(inode);
4310 return ERR_PTR(ret);
ac27a0ec
DK
4311}
4312
0fc1b451
AK
4313static int ext4_inode_blocks_set(handle_t *handle,
4314 struct ext4_inode *raw_inode,
4315 struct ext4_inode_info *ei)
4316{
4317 struct inode *inode = &(ei->vfs_inode);
4318 u64 i_blocks = inode->i_blocks;
4319 struct super_block *sb = inode->i_sb;
0fc1b451
AK
4320
4321 if (i_blocks <= ~0U) {
4322 /*
4323 * i_blocks can be represnted in a 32 bit variable
4324 * as multiple of 512 bytes
4325 */
8180a562 4326 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
0fc1b451 4327 raw_inode->i_blocks_high = 0;
8180a562 4328 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
f287a1a5
TT
4329 return 0;
4330 }
4331 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4332 return -EFBIG;
4333
4334 if (i_blocks <= 0xffffffffffffULL) {
0fc1b451
AK
4335 /*
4336 * i_blocks can be represented in a 48 bit variable
4337 * as multiple of 512 bytes
4338 */
8180a562 4339 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
0fc1b451 4340 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
8180a562 4341 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
0fc1b451 4342 } else {
8180a562
AK
4343 ei->i_flags |= EXT4_HUGE_FILE_FL;
4344 /* i_block is stored in file system block size */
4345 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4346 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4347 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
0fc1b451 4348 }
f287a1a5 4349 return 0;
0fc1b451
AK
4350}
4351
ac27a0ec
DK
4352/*
4353 * Post the struct inode info into an on-disk inode location in the
4354 * buffer-cache. This gobbles the caller's reference to the
4355 * buffer_head in the inode location struct.
4356 *
4357 * The caller must have write access to iloc->bh.
4358 */
617ba13b 4359static int ext4_do_update_inode(handle_t *handle,
ac27a0ec 4360 struct inode *inode,
617ba13b 4361 struct ext4_iloc *iloc)
ac27a0ec 4362{
617ba13b
MC
4363 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4364 struct ext4_inode_info *ei = EXT4_I(inode);
ac27a0ec
DK
4365 struct buffer_head *bh = iloc->bh;
4366 int err = 0, rc, block;
4367
4368 /* For fields not not tracking in the in-memory inode,
4369 * initialise them to zero for new inodes. */
617ba13b
MC
4370 if (ei->i_state & EXT4_STATE_NEW)
4371 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ac27a0ec 4372
ff9ddf7e 4373 ext4_get_inode_flags(ei);
ac27a0ec 4374 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
af5bc92d 4375 if (!(test_opt(inode->i_sb, NO_UID32))) {
ac27a0ec
DK
4376 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4377 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4378/*
4379 * Fix up interoperability with old kernels. Otherwise, old inodes get
4380 * re-used with the upper 16 bits of the uid/gid intact
4381 */
af5bc92d 4382 if (!ei->i_dtime) {
ac27a0ec
DK
4383 raw_inode->i_uid_high =
4384 cpu_to_le16(high_16_bits(inode->i_uid));
4385 raw_inode->i_gid_high =
4386 cpu_to_le16(high_16_bits(inode->i_gid));
4387 } else {
4388 raw_inode->i_uid_high = 0;
4389 raw_inode->i_gid_high = 0;
4390 }
4391 } else {
4392 raw_inode->i_uid_low =
4393 cpu_to_le16(fs_high2lowuid(inode->i_uid));
4394 raw_inode->i_gid_low =
4395 cpu_to_le16(fs_high2lowgid(inode->i_gid));
4396 raw_inode->i_uid_high = 0;
4397 raw_inode->i_gid_high = 0;
4398 }
4399 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
ef7f3835
KS
4400
4401 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4402 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4403 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4404 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4405
0fc1b451
AK
4406 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4407 goto out_brelse;
ac27a0ec 4408 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
267e4db9
AK
4409 /* clear the migrate flag in the raw_inode */
4410 raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
9b8f1f01
MC
4411 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4412 cpu_to_le32(EXT4_OS_HURD))
a1ddeb7e
BP
4413 raw_inode->i_file_acl_high =
4414 cpu_to_le16(ei->i_file_acl >> 32);
7973c0c1 4415 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
a48380f7
AK
4416 ext4_isize_set(raw_inode, ei->i_disksize);
4417 if (ei->i_disksize > 0x7fffffffULL) {
4418 struct super_block *sb = inode->i_sb;
4419 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4420 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4421 EXT4_SB(sb)->s_es->s_rev_level ==
4422 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4423 /* If this is the first large file
4424 * created, add a flag to the superblock.
4425 */
4426 err = ext4_journal_get_write_access(handle,
4427 EXT4_SB(sb)->s_sbh);
4428 if (err)
4429 goto out_brelse;
4430 ext4_update_dynamic_rev(sb);
4431 EXT4_SET_RO_COMPAT_FEATURE(sb,
617ba13b 4432 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
a48380f7 4433 sb->s_dirt = 1;
0390131b
FM
4434 ext4_handle_sync(handle);
4435 err = ext4_handle_dirty_metadata(handle, inode,
a48380f7 4436 EXT4_SB(sb)->s_sbh);
ac27a0ec
DK
4437 }
4438 }
4439 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4440 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4441 if (old_valid_dev(inode->i_rdev)) {
4442 raw_inode->i_block[0] =
4443 cpu_to_le32(old_encode_dev(inode->i_rdev));
4444 raw_inode->i_block[1] = 0;
4445 } else {
4446 raw_inode->i_block[0] = 0;
4447 raw_inode->i_block[1] =
4448 cpu_to_le32(new_encode_dev(inode->i_rdev));
4449 raw_inode->i_block[2] = 0;
4450 }
617ba13b 4451 } else for (block = 0; block < EXT4_N_BLOCKS; block++)
ac27a0ec
DK
4452 raw_inode->i_block[block] = ei->i_data[block];
4453
25ec56b5
JNC
4454 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4455 if (ei->i_extra_isize) {
4456 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4457 raw_inode->i_version_hi =
4458 cpu_to_le32(inode->i_version >> 32);
ac27a0ec 4459 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
25ec56b5
JNC
4460 }
4461
0390131b
FM
4462 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4463 rc = ext4_handle_dirty_metadata(handle, inode, bh);
ac27a0ec
DK
4464 if (!err)
4465 err = rc;
617ba13b 4466 ei->i_state &= ~EXT4_STATE_NEW;
ac27a0ec
DK
4467
4468out_brelse:
af5bc92d 4469 brelse(bh);
617ba13b 4470 ext4_std_error(inode->i_sb, err);
ac27a0ec
DK
4471 return err;
4472}
4473
4474/*
617ba13b 4475 * ext4_write_inode()
ac27a0ec
DK
4476 *
4477 * We are called from a few places:
4478 *
4479 * - Within generic_file_write() for O_SYNC files.
4480 * Here, there will be no transaction running. We wait for any running
4481 * trasnaction to commit.
4482 *
4483 * - Within sys_sync(), kupdate and such.
4484 * We wait on commit, if tol to.
4485 *
4486 * - Within prune_icache() (PF_MEMALLOC == true)
4487 * Here we simply return. We can't afford to block kswapd on the
4488 * journal commit.
4489 *
4490 * In all cases it is actually safe for us to return without doing anything,
4491 * because the inode has been copied into a raw inode buffer in
617ba13b 4492 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
ac27a0ec
DK
4493 * knfsd.
4494 *
4495 * Note that we are absolutely dependent upon all inode dirtiers doing the
4496 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4497 * which we are interested.
4498 *
4499 * It would be a bug for them to not do this. The code:
4500 *
4501 * mark_inode_dirty(inode)
4502 * stuff();
4503 * inode->i_size = expr;
4504 *
4505 * is in error because a kswapd-driven write_inode() could occur while
4506 * `stuff()' is running, and the new i_size will be lost. Plus the inode
4507 * will no longer be on the superblock's dirty inode list.
4508 */
617ba13b 4509int ext4_write_inode(struct inode *inode, int wait)
ac27a0ec
DK
4510{
4511 if (current->flags & PF_MEMALLOC)
4512 return 0;
4513
617ba13b 4514 if (ext4_journal_current_handle()) {
b38bd33a 4515 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
ac27a0ec
DK
4516 dump_stack();
4517 return -EIO;
4518 }
4519
4520 if (!wait)
4521 return 0;
4522
617ba13b 4523 return ext4_force_commit(inode->i_sb);
ac27a0ec
DK
4524}
4525
0390131b
FM
4526int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
4527{
4528 int err = 0;
4529
4530 mark_buffer_dirty(bh);
4531 if (inode && inode_needs_sync(inode)) {
4532 sync_dirty_buffer(bh);
4533 if (buffer_req(bh) && !buffer_uptodate(bh)) {
4534 ext4_error(inode->i_sb, __func__,
4535 "IO error syncing inode, "
4536 "inode=%lu, block=%llu",
4537 inode->i_ino,
4538 (unsigned long long)bh->b_blocknr);
4539 err = -EIO;
4540 }
4541 }
4542 return err;
4543}
4544
ac27a0ec 4545/*
617ba13b 4546 * ext4_setattr()
ac27a0ec
DK
4547 *
4548 * Called from notify_change.
4549 *
4550 * We want to trap VFS attempts to truncate the file as soon as
4551 * possible. In particular, we want to make sure that when the VFS
4552 * shrinks i_size, we put the inode on the orphan list and modify
4553 * i_disksize immediately, so that during the subsequent flushing of
4554 * dirty pages and freeing of disk blocks, we can guarantee that any
4555 * commit will leave the blocks being flushed in an unused state on
4556 * disk. (On recovery, the inode will get truncated and the blocks will
4557 * be freed, so we have a strong guarantee that no future commit will
4558 * leave these blocks visible to the user.)
4559 *
678aaf48
JK
4560 * Another thing we have to assure is that if we are in ordered mode
4561 * and inode is still attached to the committing transaction, we must
4562 * we start writeout of all the dirty pages which are being truncated.
4563 * This way we are sure that all the data written in the previous
4564 * transaction are already on disk (truncate waits for pages under
4565 * writeback).
4566 *
4567 * Called with inode->i_mutex down.
ac27a0ec 4568 */
617ba13b 4569int ext4_setattr(struct dentry *dentry, struct iattr *attr)
ac27a0ec
DK
4570{
4571 struct inode *inode = dentry->d_inode;
4572 int error, rc = 0;
4573 const unsigned int ia_valid = attr->ia_valid;
4574
4575 error = inode_change_ok(inode, attr);
4576 if (error)
4577 return error;
4578
4579 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
4580 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
4581 handle_t *handle;
4582
4583 /* (user+group)*(old+new) structure, inode write (sb,
4584 * inode block, ? - but truncate inode update has it) */
617ba13b
MC
4585 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
4586 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
ac27a0ec
DK
4587 if (IS_ERR(handle)) {
4588 error = PTR_ERR(handle);
4589 goto err_out;
4590 }
4591 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
4592 if (error) {
617ba13b 4593 ext4_journal_stop(handle);
ac27a0ec
DK
4594 return error;
4595 }
4596 /* Update corresponding info in inode so that everything is in
4597 * one transaction */
4598 if (attr->ia_valid & ATTR_UID)
4599 inode->i_uid = attr->ia_uid;
4600 if (attr->ia_valid & ATTR_GID)
4601 inode->i_gid = attr->ia_gid;
617ba13b
MC
4602 error = ext4_mark_inode_dirty(handle, inode);
4603 ext4_journal_stop(handle);
ac27a0ec
DK
4604 }
4605
e2b46574
ES
4606 if (attr->ia_valid & ATTR_SIZE) {
4607 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
4608 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4609
4610 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
4611 error = -EFBIG;
4612 goto err_out;
4613 }
4614 }
4615 }
4616
ac27a0ec
DK
4617 if (S_ISREG(inode->i_mode) &&
4618 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
4619 handle_t *handle;
4620
617ba13b 4621 handle = ext4_journal_start(inode, 3);
ac27a0ec
DK
4622 if (IS_ERR(handle)) {
4623 error = PTR_ERR(handle);
4624 goto err_out;
4625 }
4626
617ba13b
MC
4627 error = ext4_orphan_add(handle, inode);
4628 EXT4_I(inode)->i_disksize = attr->ia_size;
4629 rc = ext4_mark_inode_dirty(handle, inode);
ac27a0ec
DK
4630 if (!error)
4631 error = rc;
617ba13b 4632 ext4_journal_stop(handle);
678aaf48
JK
4633
4634 if (ext4_should_order_data(inode)) {
4635 error = ext4_begin_ordered_truncate(inode,
4636 attr->ia_size);
4637 if (error) {
4638 /* Do as much error cleanup as possible */
4639 handle = ext4_journal_start(inode, 3);
4640 if (IS_ERR(handle)) {
4641 ext4_orphan_del(NULL, inode);
4642 goto err_out;
4643 }
4644 ext4_orphan_del(handle, inode);
4645 ext4_journal_stop(handle);
4646 goto err_out;
4647 }
4648 }
ac27a0ec
DK
4649 }
4650
4651 rc = inode_setattr(inode, attr);
4652
617ba13b 4653 /* If inode_setattr's call to ext4_truncate failed to get a
ac27a0ec
DK
4654 * transaction handle at all, we need to clean up the in-core
4655 * orphan list manually. */
4656 if (inode->i_nlink)
617ba13b 4657 ext4_orphan_del(NULL, inode);
ac27a0ec
DK
4658
4659 if (!rc && (ia_valid & ATTR_MODE))
617ba13b 4660 rc = ext4_acl_chmod(inode);
ac27a0ec
DK
4661
4662err_out:
617ba13b 4663 ext4_std_error(inode->i_sb, error);
ac27a0ec
DK
4664 if (!error)
4665 error = rc;
4666 return error;
4667}
4668
3e3398a0
MC
4669int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4670 struct kstat *stat)
4671{
4672 struct inode *inode;
4673 unsigned long delalloc_blocks;
4674
4675 inode = dentry->d_inode;
4676 generic_fillattr(inode, stat);
4677
4678 /*
4679 * We can't update i_blocks if the block allocation is delayed
4680 * otherwise in the case of system crash before the real block
4681 * allocation is done, we will have i_blocks inconsistent with
4682 * on-disk file blocks.
4683 * We always keep i_blocks updated together with real
4684 * allocation. But to not confuse with user, stat
4685 * will return the blocks that include the delayed allocation
4686 * blocks for this file.
4687 */
4688 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
4689 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
4690 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
4691
4692 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4693 return 0;
4694}
ac27a0ec 4695
a02908f1
MC
4696static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
4697 int chunk)
4698{
4699 int indirects;
4700
4701 /* if nrblocks are contiguous */
4702 if (chunk) {
4703 /*
4704 * With N contiguous data blocks, it need at most
4705 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
4706 * 2 dindirect blocks
4707 * 1 tindirect block
4708 */
4709 indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
4710 return indirects + 3;
4711 }
4712 /*
4713 * if nrblocks are not contiguous, worse case, each block touch
4714 * a indirect block, and each indirect block touch a double indirect
4715 * block, plus a triple indirect block
4716 */
4717 indirects = nrblocks * 2 + 1;
4718 return indirects;
4719}
4720
4721static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4722{
4723 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
ac51d837
TT
4724 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
4725 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
a02908f1 4726}
ac51d837 4727
ac27a0ec 4728/*
a02908f1
MC
4729 * Account for index blocks, block groups bitmaps and block group
4730 * descriptor blocks if modify datablocks and index blocks
4731 * worse case, the indexs blocks spread over different block groups
ac27a0ec 4732 *
a02908f1
MC
4733 * If datablocks are discontiguous, they are possible to spread over
4734 * different block groups too. If they are contiugous, with flexbg,
4735 * they could still across block group boundary.
ac27a0ec 4736 *
a02908f1
MC
4737 * Also account for superblock, inode, quota and xattr blocks
4738 */
4739int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4740{
4741 int groups, gdpblocks;
4742 int idxblocks;
4743 int ret = 0;
4744
4745 /*
4746 * How many index blocks need to touch to modify nrblocks?
4747 * The "Chunk" flag indicating whether the nrblocks is
4748 * physically contiguous on disk
4749 *
4750 * For Direct IO and fallocate, they calls get_block to allocate
4751 * one single extent at a time, so they could set the "Chunk" flag
4752 */
4753 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4754
4755 ret = idxblocks;
4756
4757 /*
4758 * Now let's see how many group bitmaps and group descriptors need
4759 * to account
4760 */
4761 groups = idxblocks;
4762 if (chunk)
4763 groups += 1;
4764 else
4765 groups += nrblocks;
4766
4767 gdpblocks = groups;
4768 if (groups > EXT4_SB(inode->i_sb)->s_groups_count)
4769 groups = EXT4_SB(inode->i_sb)->s_groups_count;
4770 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4771 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4772
4773 /* bitmaps and block group descriptor blocks */
4774 ret += groups + gdpblocks;
4775
4776 /* Blocks for super block, inode, quota and xattr blocks */
4777 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4778
4779 return ret;
4780}
4781
4782/*
4783 * Calulate the total number of credits to reserve to fit
f3bd1f3f
MC
4784 * the modification of a single pages into a single transaction,
4785 * which may include multiple chunks of block allocations.
ac27a0ec 4786 *
525f4ed8 4787 * This could be called via ext4_write_begin()
ac27a0ec 4788 *
525f4ed8 4789 * We need to consider the worse case, when
a02908f1 4790 * one new block per extent.
ac27a0ec 4791 */
a86c6181 4792int ext4_writepage_trans_blocks(struct inode *inode)
ac27a0ec 4793{
617ba13b 4794 int bpp = ext4_journal_blocks_per_page(inode);
ac27a0ec
DK
4795 int ret;
4796
a02908f1 4797 ret = ext4_meta_trans_blocks(inode, bpp, 0);
a86c6181 4798
a02908f1 4799 /* Account for data blocks for journalled mode */
617ba13b 4800 if (ext4_should_journal_data(inode))
a02908f1 4801 ret += bpp;
ac27a0ec
DK
4802 return ret;
4803}
f3bd1f3f
MC
4804
4805/*
4806 * Calculate the journal credits for a chunk of data modification.
4807 *
4808 * This is called from DIO, fallocate or whoever calling
4809 * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks.
4810 *
4811 * journal buffers for data blocks are not included here, as DIO
4812 * and fallocate do no need to journal data buffers.
4813 */
4814int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4815{
4816 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4817}
4818
ac27a0ec 4819/*
617ba13b 4820 * The caller must have previously called ext4_reserve_inode_write().
ac27a0ec
DK
4821 * Give this, we know that the caller already has write access to iloc->bh.
4822 */
617ba13b
MC
4823int ext4_mark_iloc_dirty(handle_t *handle,
4824 struct inode *inode, struct ext4_iloc *iloc)
ac27a0ec
DK
4825{
4826 int err = 0;
4827
25ec56b5
JNC
4828 if (test_opt(inode->i_sb, I_VERSION))
4829 inode_inc_iversion(inode);
4830
ac27a0ec
DK
4831 /* the do_update_inode consumes one bh->b_count */
4832 get_bh(iloc->bh);
4833
dab291af 4834 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
617ba13b 4835 err = ext4_do_update_inode(handle, inode, iloc);
ac27a0ec
DK
4836 put_bh(iloc->bh);
4837 return err;
4838}
4839
4840/*
4841 * On success, We end up with an outstanding reference count against
4842 * iloc->bh. This _must_ be cleaned up later.
4843 */
4844
4845int
617ba13b
MC
4846ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4847 struct ext4_iloc *iloc)
ac27a0ec 4848{
0390131b
FM
4849 int err;
4850
4851 err = ext4_get_inode_loc(inode, iloc);
4852 if (!err) {
4853 BUFFER_TRACE(iloc->bh, "get_write_access");
4854 err = ext4_journal_get_write_access(handle, iloc->bh);
4855 if (err) {
4856 brelse(iloc->bh);
4857 iloc->bh = NULL;
ac27a0ec
DK
4858 }
4859 }
617ba13b 4860 ext4_std_error(inode->i_sb, err);
ac27a0ec
DK
4861 return err;
4862}
4863
6dd4ee7c
KS
4864/*
4865 * Expand an inode by new_extra_isize bytes.
4866 * Returns 0 on success or negative error number on failure.
4867 */
1d03ec98
AK
4868static int ext4_expand_extra_isize(struct inode *inode,
4869 unsigned int new_extra_isize,
4870 struct ext4_iloc iloc,
4871 handle_t *handle)
6dd4ee7c
KS
4872{
4873 struct ext4_inode *raw_inode;
4874 struct ext4_xattr_ibody_header *header;
4875 struct ext4_xattr_entry *entry;
4876
4877 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4878 return 0;
4879
4880 raw_inode = ext4_raw_inode(&iloc);
4881
4882 header = IHDR(inode, raw_inode);
4883 entry = IFIRST(header);
4884
4885 /* No extended attributes present */
4886 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
4887 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4888 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4889 new_extra_isize);
4890 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4891 return 0;
4892 }
4893
4894 /* try to expand with EAs present */
4895 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4896 raw_inode, handle);
4897}
4898
ac27a0ec
DK
4899/*
4900 * What we do here is to mark the in-core inode as clean with respect to inode
4901 * dirtiness (it may still be data-dirty).
4902 * This means that the in-core inode may be reaped by prune_icache
4903 * without having to perform any I/O. This is a very good thing,
4904 * because *any* task may call prune_icache - even ones which
4905 * have a transaction open against a different journal.
4906 *
4907 * Is this cheating? Not really. Sure, we haven't written the
4908 * inode out, but prune_icache isn't a user-visible syncing function.
4909 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4910 * we start and wait on commits.
4911 *
4912 * Is this efficient/effective? Well, we're being nice to the system
4913 * by cleaning up our inodes proactively so they can be reaped
4914 * without I/O. But we are potentially leaving up to five seconds'
4915 * worth of inodes floating about which prune_icache wants us to
4916 * write out. One way to fix that would be to get prune_icache()
4917 * to do a write_super() to free up some memory. It has the desired
4918 * effect.
4919 */
617ba13b 4920int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
ac27a0ec 4921{
617ba13b 4922 struct ext4_iloc iloc;
6dd4ee7c
KS
4923 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4924 static unsigned int mnt_count;
4925 int err, ret;
ac27a0ec
DK
4926
4927 might_sleep();
617ba13b 4928 err = ext4_reserve_inode_write(handle, inode, &iloc);
0390131b
FM
4929 if (ext4_handle_valid(handle) &&
4930 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
6dd4ee7c
KS
4931 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
4932 /*
4933 * We need extra buffer credits since we may write into EA block
4934 * with this same handle. If journal_extend fails, then it will
4935 * only result in a minor loss of functionality for that inode.
4936 * If this is felt to be critical, then e2fsck should be run to
4937 * force a large enough s_min_extra_isize.
4938 */
4939 if ((jbd2_journal_extend(handle,
4940 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4941 ret = ext4_expand_extra_isize(inode,
4942 sbi->s_want_extra_isize,
4943 iloc, handle);
4944 if (ret) {
4945 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
c1bddad9
AK
4946 if (mnt_count !=
4947 le16_to_cpu(sbi->s_es->s_mnt_count)) {
46e665e9 4948 ext4_warning(inode->i_sb, __func__,
6dd4ee7c
KS
4949 "Unable to expand inode %lu. Delete"
4950 " some EAs or run e2fsck.",
4951 inode->i_ino);
c1bddad9
AK
4952 mnt_count =
4953 le16_to_cpu(sbi->s_es->s_mnt_count);
6dd4ee7c
KS
4954 }
4955 }
4956 }
4957 }
ac27a0ec 4958 if (!err)
617ba13b 4959 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
ac27a0ec
DK
4960 return err;
4961}
4962
4963/*
617ba13b 4964 * ext4_dirty_inode() is called from __mark_inode_dirty()
ac27a0ec
DK
4965 *
4966 * We're really interested in the case where a file is being extended.
4967 * i_size has been changed by generic_commit_write() and we thus need
4968 * to include the updated inode in the current transaction.
4969 *
4970 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
4971 * are allocated to the file.
4972 *
4973 * If the inode is marked synchronous, we don't honour that here - doing
4974 * so would cause a commit on atime updates, which we don't bother doing.
4975 * We handle synchronous inodes at the highest possible level.
4976 */
617ba13b 4977void ext4_dirty_inode(struct inode *inode)
ac27a0ec 4978{
617ba13b 4979 handle_t *current_handle = ext4_journal_current_handle();
ac27a0ec
DK
4980 handle_t *handle;
4981
0390131b
FM
4982 if (!ext4_handle_valid(current_handle)) {
4983 ext4_mark_inode_dirty(current_handle, inode);
4984 return;
4985 }
4986
617ba13b 4987 handle = ext4_journal_start(inode, 2);
ac27a0ec
DK
4988 if (IS_ERR(handle))
4989 goto out;
4990 if (current_handle &&
4991 current_handle->h_transaction != handle->h_transaction) {
4992 /* This task has a transaction open against a different fs */
4993 printk(KERN_EMERG "%s: transactions do not match!\n",
46e665e9 4994 __func__);
ac27a0ec
DK
4995 } else {
4996 jbd_debug(5, "marking dirty. outer handle=%p\n",
4997 current_handle);
617ba13b 4998 ext4_mark_inode_dirty(handle, inode);
ac27a0ec 4999 }
617ba13b 5000 ext4_journal_stop(handle);
ac27a0ec
DK
5001out:
5002 return;
5003}
5004
5005#if 0
5006/*
5007 * Bind an inode's backing buffer_head into this transaction, to prevent
5008 * it from being flushed to disk early. Unlike
617ba13b 5009 * ext4_reserve_inode_write, this leaves behind no bh reference and
ac27a0ec
DK
5010 * returns no iloc structure, so the caller needs to repeat the iloc
5011 * lookup to mark the inode dirty later.
5012 */
617ba13b 5013static int ext4_pin_inode(handle_t *handle, struct inode *inode)
ac27a0ec 5014{
617ba13b 5015 struct ext4_iloc iloc;
ac27a0ec
DK
5016
5017 int err = 0;
5018 if (handle) {
617ba13b 5019 err = ext4_get_inode_loc(inode, &iloc);
ac27a0ec
DK
5020 if (!err) {
5021 BUFFER_TRACE(iloc.bh, "get_write_access");
dab291af 5022 err = jbd2_journal_get_write_access(handle, iloc.bh);
ac27a0ec 5023 if (!err)
0390131b
FM
5024 err = ext4_handle_dirty_metadata(handle,
5025 inode,
5026 iloc.bh);
ac27a0ec
DK
5027 brelse(iloc.bh);
5028 }
5029 }
617ba13b 5030 ext4_std_error(inode->i_sb, err);
ac27a0ec
DK
5031 return err;
5032}
5033#endif
5034
617ba13b 5035int ext4_change_inode_journal_flag(struct inode *inode, int val)
ac27a0ec
DK
5036{
5037 journal_t *journal;
5038 handle_t *handle;
5039 int err;
5040
5041 /*
5042 * We have to be very careful here: changing a data block's
5043 * journaling status dynamically is dangerous. If we write a
5044 * data block to the journal, change the status and then delete
5045 * that block, we risk forgetting to revoke the old log record
5046 * from the journal and so a subsequent replay can corrupt data.
5047 * So, first we make sure that the journal is empty and that
5048 * nobody is changing anything.
5049 */
5050
617ba13b 5051 journal = EXT4_JOURNAL(inode);
0390131b
FM
5052 if (!journal)
5053 return 0;
d699594d 5054 if (is_journal_aborted(journal))
ac27a0ec
DK
5055 return -EROFS;
5056
dab291af
MC
5057 jbd2_journal_lock_updates(journal);
5058 jbd2_journal_flush(journal);
ac27a0ec
DK
5059
5060 /*
5061 * OK, there are no updates running now, and all cached data is
5062 * synced to disk. We are now in a completely consistent state
5063 * which doesn't have anything in the journal, and we know that
5064 * no filesystem updates are running, so it is safe to modify
5065 * the inode's in-core data-journaling state flag now.
5066 */
5067
5068 if (val)
617ba13b 5069 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
ac27a0ec 5070 else
617ba13b
MC
5071 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
5072 ext4_set_aops(inode);
ac27a0ec 5073
dab291af 5074 jbd2_journal_unlock_updates(journal);
ac27a0ec
DK
5075
5076 /* Finally we can mark the inode as dirty. */
5077
617ba13b 5078 handle = ext4_journal_start(inode, 1);
ac27a0ec
DK
5079 if (IS_ERR(handle))
5080 return PTR_ERR(handle);
5081
617ba13b 5082 err = ext4_mark_inode_dirty(handle, inode);
0390131b 5083 ext4_handle_sync(handle);
617ba13b
MC
5084 ext4_journal_stop(handle);
5085 ext4_std_error(inode->i_sb, err);
ac27a0ec
DK
5086
5087 return err;
5088}
2e9ee850
AK
5089
5090static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5091{
5092 return !buffer_mapped(bh);
5093}
5094
5095int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
5096{
5097 loff_t size;
5098 unsigned long len;
5099 int ret = -EINVAL;
79f0be8d 5100 void *fsdata;
2e9ee850
AK
5101 struct file *file = vma->vm_file;
5102 struct inode *inode = file->f_path.dentry->d_inode;
5103 struct address_space *mapping = inode->i_mapping;
5104
5105 /*
5106 * Get i_alloc_sem to stop truncates messing with the inode. We cannot
5107 * get i_mutex because we are already holding mmap_sem.
5108 */
5109 down_read(&inode->i_alloc_sem);
5110 size = i_size_read(inode);
5111 if (page->mapping != mapping || size <= page_offset(page)
5112 || !PageUptodate(page)) {
5113 /* page got truncated from under us? */
5114 goto out_unlock;
5115 }
5116 ret = 0;
5117 if (PageMappedToDisk(page))
5118 goto out_unlock;
5119
5120 if (page->index == size >> PAGE_CACHE_SHIFT)
5121 len = size & ~PAGE_CACHE_MASK;
5122 else
5123 len = PAGE_CACHE_SIZE;
5124
5125 if (page_has_buffers(page)) {
5126 /* return if we have all the buffers mapped */
5127 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
5128 ext4_bh_unmapped))
5129 goto out_unlock;
5130 }
5131 /*
5132 * OK, we need to fill the hole... Do write_begin write_end
5133 * to do block allocation/reservation.We are not holding
5134 * inode.i__mutex here. That allow * parallel write_begin,
5135 * write_end call. lock_page prevent this from happening
5136 * on the same page though
5137 */
5138 ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
79f0be8d 5139 len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
2e9ee850
AK
5140 if (ret < 0)
5141 goto out_unlock;
5142 ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
79f0be8d 5143 len, len, page, fsdata);
2e9ee850
AK
5144 if (ret < 0)
5145 goto out_unlock;
5146 ret = 0;
5147out_unlock:
5148 up_read(&inode->i_alloc_sem);
5149 return ret;
5150}