]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/fs-writeback.c | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds. | |
5 | * | |
6 | * Contains all the functions related to writing back and waiting | |
7 | * upon dirty inodes against superblocks, and writing back dirty | |
8 | * pages against inodes. ie: data writeback. Writeout of the | |
9 | * inode itself is not handled here. | |
10 | * | |
11 | * 10Apr2002 akpm@zip.com.au | |
12 | * Split out of fs/inode.c | |
13 | * Additions for address_space-based writeback | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
f5ff8422 | 17 | #include <linux/module.h> |
1da177e4 LT |
18 | #include <linux/spinlock.h> |
19 | #include <linux/sched.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/writeback.h> | |
23 | #include <linux/blkdev.h> | |
24 | #include <linux/backing-dev.h> | |
25 | #include <linux/buffer_head.h> | |
07f3f05c | 26 | #include "internal.h" |
1da177e4 LT |
27 | |
28 | /** | |
29 | * __mark_inode_dirty - internal function | |
30 | * @inode: inode to mark | |
31 | * @flags: what kind of dirty (i.e. I_DIRTY_SYNC) | |
32 | * Mark an inode as dirty. Callers should use mark_inode_dirty or | |
33 | * mark_inode_dirty_sync. | |
34 | * | |
35 | * Put the inode on the super block's dirty list. | |
36 | * | |
37 | * CAREFUL! We mark it dirty unconditionally, but move it onto the | |
38 | * dirty list only if it is hashed or if it refers to a blockdev. | |
39 | * If it was not hashed, it will never be added to the dirty list | |
40 | * even if it is later hashed, as it will have been marked dirty already. | |
41 | * | |
42 | * In short, make sure you hash any inodes _before_ you start marking | |
43 | * them dirty. | |
44 | * | |
45 | * This function *must* be atomic for the I_DIRTY_PAGES case - | |
46 | * set_page_dirty() is called under spinlock in several places. | |
47 | * | |
48 | * Note that for blockdevs, inode->dirtied_when represents the dirtying time of | |
49 | * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of | |
50 | * the kernel-internal blockdev inode represents the dirtying time of the | |
51 | * blockdev's pages. This is why for I_DIRTY_PAGES we always use | |
52 | * page->mapping->host, so the page-dirtying time is recorded in the internal | |
53 | * blockdev inode. | |
54 | */ | |
55 | void __mark_inode_dirty(struct inode *inode, int flags) | |
56 | { | |
57 | struct super_block *sb = inode->i_sb; | |
58 | ||
59 | /* | |
60 | * Don't do this for I_DIRTY_PAGES - that doesn't actually | |
61 | * dirty the inode itself | |
62 | */ | |
63 | if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { | |
64 | if (sb->s_op->dirty_inode) | |
65 | sb->s_op->dirty_inode(inode); | |
66 | } | |
67 | ||
68 | /* | |
69 | * make sure that changes are seen by all cpus before we test i_state | |
70 | * -- mikulas | |
71 | */ | |
72 | smp_mb(); | |
73 | ||
74 | /* avoid the locking if we can */ | |
75 | if ((inode->i_state & flags) == flags) | |
76 | return; | |
77 | ||
78 | if (unlikely(block_dump)) { | |
79 | struct dentry *dentry = NULL; | |
80 | const char *name = "?"; | |
81 | ||
82 | if (!list_empty(&inode->i_dentry)) { | |
83 | dentry = list_entry(inode->i_dentry.next, | |
84 | struct dentry, d_alias); | |
85 | if (dentry && dentry->d_name.name) | |
86 | name = (const char *) dentry->d_name.name; | |
87 | } | |
88 | ||
89 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) | |
90 | printk(KERN_DEBUG | |
91 | "%s(%d): dirtied inode %lu (%s) on %s\n", | |
92 | current->comm, current->pid, inode->i_ino, | |
93 | name, inode->i_sb->s_id); | |
94 | } | |
95 | ||
96 | spin_lock(&inode_lock); | |
97 | if ((inode->i_state & flags) != flags) { | |
98 | const int was_dirty = inode->i_state & I_DIRTY; | |
99 | ||
100 | inode->i_state |= flags; | |
101 | ||
102 | /* | |
103 | * If the inode is locked, just update its dirty state. | |
104 | * The unlocker will place the inode on the appropriate | |
105 | * superblock list, based upon its state. | |
106 | */ | |
107 | if (inode->i_state & I_LOCK) | |
108 | goto out; | |
109 | ||
110 | /* | |
111 | * Only add valid (hashed) inodes to the superblock's | |
112 | * dirty list. Add blockdev inodes as well. | |
113 | */ | |
114 | if (!S_ISBLK(inode->i_mode)) { | |
115 | if (hlist_unhashed(&inode->i_hash)) | |
116 | goto out; | |
117 | } | |
118 | if (inode->i_state & (I_FREEING|I_CLEAR)) | |
119 | goto out; | |
120 | ||
121 | /* | |
2c136579 | 122 | * If the inode was already on s_dirty/s_io/s_more_io, don't |
1da177e4 LT |
123 | * reposition it (that would break s_dirty time-ordering). |
124 | */ | |
125 | if (!was_dirty) { | |
126 | inode->dirtied_when = jiffies; | |
127 | list_move(&inode->i_list, &sb->s_dirty); | |
128 | } | |
129 | } | |
130 | out: | |
131 | spin_unlock(&inode_lock); | |
132 | } | |
133 | ||
134 | EXPORT_SYMBOL(__mark_inode_dirty); | |
135 | ||
136 | static int write_inode(struct inode *inode, int sync) | |
137 | { | |
138 | if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) | |
139 | return inode->i_sb->s_op->write_inode(inode, sync); | |
140 | return 0; | |
141 | } | |
142 | ||
6610a0bc AM |
143 | /* |
144 | * Redirty an inode: set its when-it-was dirtied timestamp and move it to the | |
145 | * furthest end of its superblock's dirty-inode list. | |
146 | * | |
147 | * Before stamping the inode's ->dirtied_when, we check to see whether it is | |
148 | * already the most-recently-dirtied inode on the s_dirty list. If that is | |
149 | * the case then the inode must have been redirtied while it was being written | |
150 | * out and we don't reset its dirtied_when. | |
151 | */ | |
152 | static void redirty_tail(struct inode *inode) | |
153 | { | |
154 | struct super_block *sb = inode->i_sb; | |
155 | ||
156 | if (!list_empty(&sb->s_dirty)) { | |
157 | struct inode *tail_inode; | |
158 | ||
159 | tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list); | |
160 | if (!time_after_eq(inode->dirtied_when, | |
161 | tail_inode->dirtied_when)) | |
162 | inode->dirtied_when = jiffies; | |
163 | } | |
164 | list_move(&inode->i_list, &sb->s_dirty); | |
165 | } | |
166 | ||
c986d1e2 | 167 | /* |
0e0f4fc2 | 168 | * requeue inode for re-scanning after sb->s_io list is exhausted. |
c986d1e2 | 169 | */ |
0e0f4fc2 | 170 | static void requeue_io(struct inode *inode) |
c986d1e2 | 171 | { |
0e0f4fc2 | 172 | list_move(&inode->i_list, &inode->i_sb->s_more_io); |
c986d1e2 AM |
173 | } |
174 | ||
2c136579 FW |
175 | /* |
176 | * Move expired dirty inodes from @delaying_queue to @dispatch_queue. | |
177 | */ | |
178 | static void move_expired_inodes(struct list_head *delaying_queue, | |
179 | struct list_head *dispatch_queue, | |
180 | unsigned long *older_than_this) | |
181 | { | |
182 | while (!list_empty(delaying_queue)) { | |
183 | struct inode *inode = list_entry(delaying_queue->prev, | |
184 | struct inode, i_list); | |
185 | if (older_than_this && | |
186 | time_after(inode->dirtied_when, *older_than_this)) | |
187 | break; | |
188 | list_move(&inode->i_list, dispatch_queue); | |
189 | } | |
190 | } | |
191 | ||
192 | /* | |
193 | * Queue all expired dirty inodes for io, eldest first. | |
194 | */ | |
195 | static void queue_io(struct super_block *sb, | |
196 | unsigned long *older_than_this) | |
197 | { | |
198 | list_splice_init(&sb->s_more_io, sb->s_io.prev); | |
199 | move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this); | |
200 | } | |
201 | ||
08d8e974 FW |
202 | int sb_has_dirty_inodes(struct super_block *sb) |
203 | { | |
204 | return !list_empty(&sb->s_dirty) || | |
205 | !list_empty(&sb->s_io) || | |
206 | !list_empty(&sb->s_more_io); | |
207 | } | |
208 | EXPORT_SYMBOL(sb_has_dirty_inodes); | |
209 | ||
1da177e4 LT |
210 | /* |
211 | * Write a single inode's dirty pages and inode data out to disk. | |
212 | * If `wait' is set, wait on the writeout. | |
213 | * | |
214 | * The whole writeout design is quite complex and fragile. We want to avoid | |
215 | * starvation of particular inodes when others are being redirtied, prevent | |
216 | * livelocks, etc. | |
217 | * | |
218 | * Called under inode_lock. | |
219 | */ | |
220 | static int | |
221 | __sync_single_inode(struct inode *inode, struct writeback_control *wbc) | |
222 | { | |
223 | unsigned dirty; | |
224 | struct address_space *mapping = inode->i_mapping; | |
1da177e4 LT |
225 | int wait = wbc->sync_mode == WB_SYNC_ALL; |
226 | int ret; | |
227 | ||
228 | BUG_ON(inode->i_state & I_LOCK); | |
229 | ||
230 | /* Set I_LOCK, reset I_DIRTY */ | |
231 | dirty = inode->i_state & I_DIRTY; | |
232 | inode->i_state |= I_LOCK; | |
233 | inode->i_state &= ~I_DIRTY; | |
234 | ||
235 | spin_unlock(&inode_lock); | |
236 | ||
237 | ret = do_writepages(mapping, wbc); | |
238 | ||
239 | /* Don't write the inode if only I_DIRTY_PAGES was set */ | |
240 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { | |
241 | int err = write_inode(inode, wait); | |
242 | if (ret == 0) | |
243 | ret = err; | |
244 | } | |
245 | ||
246 | if (wait) { | |
247 | int err = filemap_fdatawait(mapping); | |
248 | if (ret == 0) | |
249 | ret = err; | |
250 | } | |
251 | ||
252 | spin_lock(&inode_lock); | |
253 | inode->i_state &= ~I_LOCK; | |
254 | if (!(inode->i_state & I_FREEING)) { | |
255 | if (!(inode->i_state & I_DIRTY) && | |
256 | mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | |
257 | /* | |
258 | * We didn't write back all the pages. nfs_writepages() | |
259 | * sometimes bales out without doing anything. Redirty | |
2c136579 | 260 | * the inode; Move it from s_io onto s_more_io/s_dirty. |
1b43ef91 AM |
261 | */ |
262 | /* | |
263 | * akpm: if the caller was the kupdate function we put | |
264 | * this inode at the head of s_dirty so it gets first | |
265 | * consideration. Otherwise, move it to the tail, for | |
266 | * the reasons described there. I'm not really sure | |
267 | * how much sense this makes. Presumably I had a good | |
268 | * reasons for doing it this way, and I'd rather not | |
269 | * muck with it at present. | |
1da177e4 LT |
270 | */ |
271 | if (wbc->for_kupdate) { | |
272 | /* | |
2c136579 FW |
273 | * For the kupdate function we move the inode |
274 | * to s_more_io so it will get more writeout as | |
275 | * soon as the queue becomes uncongested. | |
1da177e4 LT |
276 | */ |
277 | inode->i_state |= I_DIRTY_PAGES; | |
0e0f4fc2 | 278 | requeue_io(inode); |
1da177e4 LT |
279 | } else { |
280 | /* | |
281 | * Otherwise fully redirty the inode so that | |
282 | * other inodes on this superblock will get some | |
283 | * writeout. Otherwise heavy writing to one | |
284 | * file would indefinitely suspend writeout of | |
285 | * all the other files. | |
286 | */ | |
287 | inode->i_state |= I_DIRTY_PAGES; | |
1b43ef91 | 288 | redirty_tail(inode); |
1da177e4 LT |
289 | } |
290 | } else if (inode->i_state & I_DIRTY) { | |
291 | /* | |
292 | * Someone redirtied the inode while were writing back | |
293 | * the pages. | |
294 | */ | |
6610a0bc | 295 | redirty_tail(inode); |
1da177e4 LT |
296 | } else if (atomic_read(&inode->i_count)) { |
297 | /* | |
298 | * The inode is clean, inuse | |
299 | */ | |
300 | list_move(&inode->i_list, &inode_in_use); | |
301 | } else { | |
302 | /* | |
303 | * The inode is clean, unused | |
304 | */ | |
305 | list_move(&inode->i_list, &inode_unused); | |
1da177e4 LT |
306 | } |
307 | } | |
308 | wake_up_inode(inode); | |
309 | return ret; | |
310 | } | |
311 | ||
312 | /* | |
7f04c26d AA |
313 | * Write out an inode's dirty pages. Called under inode_lock. Either the |
314 | * caller has ref on the inode (either via __iget or via syscall against an fd) | |
315 | * or the inode has I_WILL_FREE set (via generic_forget_inode) | |
1da177e4 LT |
316 | */ |
317 | static int | |
7f04c26d | 318 | __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
1da177e4 LT |
319 | { |
320 | wait_queue_head_t *wqh; | |
321 | ||
7f04c26d | 322 | if (!atomic_read(&inode->i_count)) |
659603ef | 323 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); |
7f04c26d AA |
324 | else |
325 | WARN_ON(inode->i_state & I_WILL_FREE); | |
326 | ||
1da177e4 | 327 | if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) { |
4b89eed9 LT |
328 | struct address_space *mapping = inode->i_mapping; |
329 | int ret; | |
330 | ||
65cb9b47 AM |
331 | /* |
332 | * We're skipping this inode because it's locked, and we're not | |
2c136579 FW |
333 | * doing writeback-for-data-integrity. Move it to s_more_io so |
334 | * that writeback can proceed with the other inodes on s_io. | |
335 | * We'll have another go at writing back this inode when we | |
336 | * completed a full scan of s_io. | |
65cb9b47 | 337 | */ |
0e0f4fc2 | 338 | requeue_io(inode); |
4b89eed9 LT |
339 | |
340 | /* | |
341 | * Even if we don't actually write the inode itself here, | |
342 | * we can at least start some of the data writeout.. | |
343 | */ | |
344 | spin_unlock(&inode_lock); | |
345 | ret = do_writepages(mapping, wbc); | |
346 | spin_lock(&inode_lock); | |
347 | return ret; | |
1da177e4 LT |
348 | } |
349 | ||
350 | /* | |
351 | * It's a data-integrity sync. We must wait. | |
352 | */ | |
353 | if (inode->i_state & I_LOCK) { | |
354 | DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK); | |
355 | ||
356 | wqh = bit_waitqueue(&inode->i_state, __I_LOCK); | |
357 | do { | |
1da177e4 LT |
358 | spin_unlock(&inode_lock); |
359 | __wait_on_bit(wqh, &wq, inode_wait, | |
360 | TASK_UNINTERRUPTIBLE); | |
1da177e4 LT |
361 | spin_lock(&inode_lock); |
362 | } while (inode->i_state & I_LOCK); | |
363 | } | |
364 | return __sync_single_inode(inode, wbc); | |
365 | } | |
366 | ||
367 | /* | |
368 | * Write out a superblock's list of dirty inodes. A wait will be performed | |
369 | * upon no inodes, all inodes or the final one, depending upon sync_mode. | |
370 | * | |
371 | * If older_than_this is non-NULL, then only write out inodes which | |
372 | * had their first dirtying at a time earlier than *older_than_this. | |
373 | * | |
374 | * If we're a pdlfush thread, then implement pdflush collision avoidance | |
375 | * against the entire list. | |
376 | * | |
377 | * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so | |
378 | * that it can be located for waiting on in __writeback_single_inode(). | |
379 | * | |
380 | * Called under inode_lock. | |
381 | * | |
382 | * If `bdi' is non-zero then we're being asked to writeback a specific queue. | |
383 | * This function assumes that the blockdev superblock's inodes are backed by | |
384 | * a variety of queues, so all inodes are searched. For other superblocks, | |
385 | * assume that all inodes are backed by the same queue. | |
386 | * | |
387 | * FIXME: this linear search could get expensive with many fileystems. But | |
388 | * how to fix? We need to go from an address_space to all inodes which share | |
389 | * a queue with that address_space. (Easy: have a global "dirty superblocks" | |
390 | * list). | |
391 | * | |
392 | * The inodes to be written are parked on sb->s_io. They are moved back onto | |
393 | * sb->s_dirty as they are selected for writing. This way, none can be missed | |
394 | * on the writer throttling path, and we get decent balancing between many | |
395 | * throttled threads: we don't want them all piling up on __wait_on_inode. | |
396 | */ | |
397 | static void | |
398 | sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc) | |
399 | { | |
400 | const unsigned long start = jiffies; /* livelock avoidance */ | |
401 | ||
402 | if (!wbc->for_kupdate || list_empty(&sb->s_io)) | |
2c136579 | 403 | queue_io(sb, wbc->older_than_this); |
1da177e4 LT |
404 | |
405 | while (!list_empty(&sb->s_io)) { | |
406 | struct inode *inode = list_entry(sb->s_io.prev, | |
407 | struct inode, i_list); | |
408 | struct address_space *mapping = inode->i_mapping; | |
409 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
410 | long pages_skipped; | |
411 | ||
412 | if (!bdi_cap_writeback_dirty(bdi)) { | |
9852a0e7 | 413 | redirty_tail(inode); |
7b0de42d | 414 | if (sb_is_blkdev_sb(sb)) { |
1da177e4 LT |
415 | /* |
416 | * Dirty memory-backed blockdev: the ramdisk | |
417 | * driver does this. Skip just this inode | |
418 | */ | |
419 | continue; | |
420 | } | |
421 | /* | |
422 | * Dirty memory-backed inode against a filesystem other | |
423 | * than the kernel-internal bdev filesystem. Skip the | |
424 | * entire superblock. | |
425 | */ | |
426 | break; | |
427 | } | |
428 | ||
429 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | |
430 | wbc->encountered_congestion = 1; | |
7b0de42d | 431 | if (!sb_is_blkdev_sb(sb)) |
1da177e4 | 432 | break; /* Skip a congested fs */ |
0e0f4fc2 | 433 | requeue_io(inode); |
1da177e4 LT |
434 | continue; /* Skip a congested blockdev */ |
435 | } | |
436 | ||
437 | if (wbc->bdi && bdi != wbc->bdi) { | |
7b0de42d | 438 | if (!sb_is_blkdev_sb(sb)) |
1da177e4 | 439 | break; /* fs has the wrong queue */ |
0e0f4fc2 | 440 | requeue_io(inode); |
1da177e4 LT |
441 | continue; /* blockdev has wrong queue */ |
442 | } | |
443 | ||
444 | /* Was this inode dirtied after sync_sb_inodes was called? */ | |
445 | if (time_after(inode->dirtied_when, start)) | |
446 | break; | |
447 | ||
1da177e4 LT |
448 | /* Is another pdflush already flushing this queue? */ |
449 | if (current_is_pdflush() && !writeback_acquire(bdi)) | |
450 | break; | |
451 | ||
452 | BUG_ON(inode->i_state & I_FREEING); | |
453 | __iget(inode); | |
454 | pages_skipped = wbc->pages_skipped; | |
455 | __writeback_single_inode(inode, wbc); | |
456 | if (wbc->sync_mode == WB_SYNC_HOLD) { | |
457 | inode->dirtied_when = jiffies; | |
458 | list_move(&inode->i_list, &sb->s_dirty); | |
459 | } | |
460 | if (current_is_pdflush()) | |
461 | writeback_release(bdi); | |
462 | if (wbc->pages_skipped != pages_skipped) { | |
463 | /* | |
464 | * writeback is not making progress due to locked | |
465 | * buffers. Skip this inode for now. | |
466 | */ | |
f57b9b7b | 467 | redirty_tail(inode); |
1da177e4 LT |
468 | } |
469 | spin_unlock(&inode_lock); | |
1da177e4 | 470 | iput(inode); |
4ffc8444 | 471 | cond_resched(); |
1da177e4 LT |
472 | spin_lock(&inode_lock); |
473 | if (wbc->nr_to_write <= 0) | |
474 | break; | |
475 | } | |
2e6883bd FW |
476 | if (!list_empty(&sb->s_more_io)) |
477 | wbc->more_io = 1; | |
1da177e4 LT |
478 | return; /* Leave any unwritten inodes on s_io */ |
479 | } | |
480 | ||
481 | /* | |
482 | * Start writeback of dirty pagecache data against all unlocked inodes. | |
483 | * | |
484 | * Note: | |
485 | * We don't need to grab a reference to superblock here. If it has non-empty | |
486 | * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed | |
2c136579 | 487 | * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all |
1da177e4 LT |
488 | * empty. Since __sync_single_inode() regains inode_lock before it finally moves |
489 | * inode from superblock lists we are OK. | |
490 | * | |
491 | * If `older_than_this' is non-zero then only flush inodes which have a | |
492 | * flushtime older than *older_than_this. | |
493 | * | |
494 | * If `bdi' is non-zero then we will scan the first inode against each | |
495 | * superblock until we find the matching ones. One group will be the dirty | |
496 | * inodes against a filesystem. Then when we hit the dummy blockdev superblock, | |
497 | * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not | |
498 | * super-efficient but we're about to do a ton of I/O... | |
499 | */ | |
500 | void | |
501 | writeback_inodes(struct writeback_control *wbc) | |
502 | { | |
503 | struct super_block *sb; | |
504 | ||
505 | might_sleep(); | |
506 | spin_lock(&sb_lock); | |
507 | restart: | |
508 | sb = sb_entry(super_blocks.prev); | |
509 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { | |
08d8e974 | 510 | if (sb_has_dirty_inodes(sb)) { |
1da177e4 LT |
511 | /* we're making our own get_super here */ |
512 | sb->s_count++; | |
513 | spin_unlock(&sb_lock); | |
514 | /* | |
515 | * If we can't get the readlock, there's no sense in | |
516 | * waiting around, most of the time the FS is going to | |
517 | * be unmounted by the time it is released. | |
518 | */ | |
519 | if (down_read_trylock(&sb->s_umount)) { | |
520 | if (sb->s_root) { | |
521 | spin_lock(&inode_lock); | |
522 | sync_sb_inodes(sb, wbc); | |
523 | spin_unlock(&inode_lock); | |
524 | } | |
525 | up_read(&sb->s_umount); | |
526 | } | |
527 | spin_lock(&sb_lock); | |
528 | if (__put_super_and_need_restart(sb)) | |
529 | goto restart; | |
530 | } | |
531 | if (wbc->nr_to_write <= 0) | |
532 | break; | |
533 | } | |
534 | spin_unlock(&sb_lock); | |
535 | } | |
536 | ||
537 | /* | |
538 | * writeback and wait upon the filesystem's dirty inodes. The caller will | |
539 | * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is | |
540 | * used to park the written inodes on sb->s_dirty for the wait pass. | |
541 | * | |
542 | * A finite limit is set on the number of pages which will be written. | |
543 | * To prevent infinite livelock of sys_sync(). | |
544 | * | |
545 | * We add in the number of potentially dirty inodes, because each inode write | |
546 | * can dirty pagecache in the underlying blockdev. | |
547 | */ | |
548 | void sync_inodes_sb(struct super_block *sb, int wait) | |
549 | { | |
550 | struct writeback_control wbc = { | |
551 | .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, | |
111ebb6e OH |
552 | .range_start = 0, |
553 | .range_end = LLONG_MAX, | |
1da177e4 | 554 | }; |
b1e7a8fd | 555 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); |
fd39fc85 | 556 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); |
1da177e4 LT |
557 | |
558 | wbc.nr_to_write = nr_dirty + nr_unstable + | |
559 | (inodes_stat.nr_inodes - inodes_stat.nr_unused) + | |
560 | nr_dirty + nr_unstable; | |
561 | wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */ | |
562 | spin_lock(&inode_lock); | |
563 | sync_sb_inodes(sb, &wbc); | |
564 | spin_unlock(&inode_lock); | |
565 | } | |
566 | ||
567 | /* | |
568 | * Rather lame livelock avoidance. | |
569 | */ | |
570 | static void set_sb_syncing(int val) | |
571 | { | |
572 | struct super_block *sb; | |
573 | spin_lock(&sb_lock); | |
574 | sb = sb_entry(super_blocks.prev); | |
575 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { | |
576 | sb->s_syncing = val; | |
577 | } | |
578 | spin_unlock(&sb_lock); | |
579 | } | |
580 | ||
1da177e4 | 581 | /** |
67be2dd1 MW |
582 | * sync_inodes - writes all inodes to disk |
583 | * @wait: wait for completion | |
1da177e4 LT |
584 | * |
585 | * sync_inodes() goes through each super block's dirty inode list, writes the | |
586 | * inodes out, waits on the writeout and puts the inodes back on the normal | |
587 | * list. | |
588 | * | |
589 | * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle | |
590 | * part of the sync functions is that the blockdev "superblock" is processed | |
591 | * last. This is because the write_inode() function of a typical fs will | |
592 | * perform no I/O, but will mark buffers in the blockdev mapping as dirty. | |
593 | * What we want to do is to perform all that dirtying first, and then write | |
594 | * back all those inode blocks via the blockdev mapping in one sweep. So the | |
595 | * additional (somewhat redundant) sync_blockdev() calls here are to make | |
596 | * sure that really happens. Because if we call sync_inodes_sb(wait=1) with | |
597 | * outstanding dirty inodes, the writeback goes block-at-a-time within the | |
598 | * filesystem's write_inode(). This is extremely slow. | |
599 | */ | |
618f0636 | 600 | static void __sync_inodes(int wait) |
1da177e4 LT |
601 | { |
602 | struct super_block *sb; | |
603 | ||
618f0636 KK |
604 | spin_lock(&sb_lock); |
605 | restart: | |
606 | list_for_each_entry(sb, &super_blocks, s_list) { | |
607 | if (sb->s_syncing) | |
608 | continue; | |
609 | sb->s_syncing = 1; | |
610 | sb->s_count++; | |
611 | spin_unlock(&sb_lock); | |
612 | down_read(&sb->s_umount); | |
613 | if (sb->s_root) { | |
614 | sync_inodes_sb(sb, wait); | |
615 | sync_blockdev(sb->s_bdev); | |
616 | } | |
617 | up_read(&sb->s_umount); | |
618 | spin_lock(&sb_lock); | |
619 | if (__put_super_and_need_restart(sb)) | |
620 | goto restart; | |
1da177e4 | 621 | } |
618f0636 KK |
622 | spin_unlock(&sb_lock); |
623 | } | |
624 | ||
625 | void sync_inodes(int wait) | |
626 | { | |
627 | set_sb_syncing(0); | |
628 | __sync_inodes(0); | |
629 | ||
1da177e4 LT |
630 | if (wait) { |
631 | set_sb_syncing(0); | |
618f0636 | 632 | __sync_inodes(1); |
1da177e4 LT |
633 | } |
634 | } | |
635 | ||
636 | /** | |
7f04c26d AA |
637 | * write_inode_now - write an inode to disk |
638 | * @inode: inode to write to disk | |
639 | * @sync: whether the write should be synchronous or not | |
640 | * | |
641 | * This function commits an inode to disk immediately if it is dirty. This is | |
642 | * primarily needed by knfsd. | |
1da177e4 | 643 | * |
7f04c26d | 644 | * The caller must either have a ref on the inode or must have set I_WILL_FREE. |
1da177e4 | 645 | */ |
1da177e4 LT |
646 | int write_inode_now(struct inode *inode, int sync) |
647 | { | |
648 | int ret; | |
649 | struct writeback_control wbc = { | |
650 | .nr_to_write = LONG_MAX, | |
651 | .sync_mode = WB_SYNC_ALL, | |
111ebb6e OH |
652 | .range_start = 0, |
653 | .range_end = LLONG_MAX, | |
1da177e4 LT |
654 | }; |
655 | ||
656 | if (!mapping_cap_writeback_dirty(inode->i_mapping)) | |
49364ce2 | 657 | wbc.nr_to_write = 0; |
1da177e4 LT |
658 | |
659 | might_sleep(); | |
660 | spin_lock(&inode_lock); | |
661 | ret = __writeback_single_inode(inode, &wbc); | |
662 | spin_unlock(&inode_lock); | |
663 | if (sync) | |
664 | wait_on_inode(inode); | |
665 | return ret; | |
666 | } | |
667 | EXPORT_SYMBOL(write_inode_now); | |
668 | ||
669 | /** | |
670 | * sync_inode - write an inode and its pages to disk. | |
671 | * @inode: the inode to sync | |
672 | * @wbc: controls the writeback mode | |
673 | * | |
674 | * sync_inode() will write an inode and its pages to disk. It will also | |
675 | * correctly update the inode on its superblock's dirty inode lists and will | |
676 | * update inode->i_state. | |
677 | * | |
678 | * The caller must have a ref on the inode. | |
679 | */ | |
680 | int sync_inode(struct inode *inode, struct writeback_control *wbc) | |
681 | { | |
682 | int ret; | |
683 | ||
684 | spin_lock(&inode_lock); | |
685 | ret = __writeback_single_inode(inode, wbc); | |
686 | spin_unlock(&inode_lock); | |
687 | return ret; | |
688 | } | |
689 | EXPORT_SYMBOL(sync_inode); | |
690 | ||
691 | /** | |
692 | * generic_osync_inode - flush all dirty data for a given inode to disk | |
693 | * @inode: inode to write | |
67be2dd1 | 694 | * @mapping: the address_space that should be flushed |
1da177e4 LT |
695 | * @what: what to write and wait upon |
696 | * | |
697 | * This can be called by file_write functions for files which have the | |
698 | * O_SYNC flag set, to flush dirty writes to disk. | |
699 | * | |
700 | * @what is a bitmask, specifying which part of the inode's data should be | |
b8887e6e | 701 | * written and waited upon. |
1da177e4 LT |
702 | * |
703 | * OSYNC_DATA: i_mapping's dirty data | |
704 | * OSYNC_METADATA: the buffers at i_mapping->private_list | |
705 | * OSYNC_INODE: the inode itself | |
706 | */ | |
707 | ||
708 | int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what) | |
709 | { | |
710 | int err = 0; | |
711 | int need_write_inode_now = 0; | |
712 | int err2; | |
713 | ||
1da177e4 LT |
714 | if (what & OSYNC_DATA) |
715 | err = filemap_fdatawrite(mapping); | |
716 | if (what & (OSYNC_METADATA|OSYNC_DATA)) { | |
717 | err2 = sync_mapping_buffers(mapping); | |
718 | if (!err) | |
719 | err = err2; | |
720 | } | |
721 | if (what & OSYNC_DATA) { | |
722 | err2 = filemap_fdatawait(mapping); | |
723 | if (!err) | |
724 | err = err2; | |
725 | } | |
1da177e4 LT |
726 | |
727 | spin_lock(&inode_lock); | |
728 | if ((inode->i_state & I_DIRTY) && | |
729 | ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC))) | |
730 | need_write_inode_now = 1; | |
731 | spin_unlock(&inode_lock); | |
732 | ||
733 | if (need_write_inode_now) { | |
734 | err2 = write_inode_now(inode, 1); | |
735 | if (!err) | |
736 | err = err2; | |
737 | } | |
738 | else | |
739 | wait_on_inode(inode); | |
740 | ||
741 | return err; | |
742 | } | |
743 | ||
744 | EXPORT_SYMBOL(generic_osync_inode); | |
745 | ||
746 | /** | |
747 | * writeback_acquire: attempt to get exclusive writeback access to a device | |
748 | * @bdi: the device's backing_dev_info structure | |
749 | * | |
750 | * It is a waste of resources to have more than one pdflush thread blocked on | |
751 | * a single request queue. Exclusion at the request_queue level is obtained | |
752 | * via a flag in the request_queue's backing_dev_info.state. | |
753 | * | |
754 | * Non-request_queue-backed address_spaces will share default_backing_dev_info, | |
755 | * unless they implement their own. Which is somewhat inefficient, as this | |
756 | * may prevent concurrent writeback against multiple devices. | |
757 | */ | |
758 | int writeback_acquire(struct backing_dev_info *bdi) | |
759 | { | |
760 | return !test_and_set_bit(BDI_pdflush, &bdi->state); | |
761 | } | |
762 | ||
763 | /** | |
764 | * writeback_in_progress: determine whether there is writeback in progress | |
1da177e4 | 765 | * @bdi: the device's backing_dev_info structure. |
b8887e6e RD |
766 | * |
767 | * Determine whether there is writeback in progress against a backing device. | |
1da177e4 LT |
768 | */ |
769 | int writeback_in_progress(struct backing_dev_info *bdi) | |
770 | { | |
771 | return test_bit(BDI_pdflush, &bdi->state); | |
772 | } | |
773 | ||
774 | /** | |
775 | * writeback_release: relinquish exclusive writeback access against a device. | |
776 | * @bdi: the device's backing_dev_info structure | |
777 | */ | |
778 | void writeback_release(struct backing_dev_info *bdi) | |
779 | { | |
780 | BUG_ON(!writeback_in_progress(bdi)); | |
781 | clear_bit(BDI_pdflush, &bdi->state); | |
782 | } |