]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/fs-writeback.c
writeback: remove the always false bdi_cap_writeback_dirty() test
[net-next-2.6.git] / fs / fs-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
f5ff8422 17#include <linux/module.h>
1da177e4
LT
18#include <linux/spinlock.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
03ba3782
JA
22#include <linux/kthread.h>
23#include <linux/freezer.h>
1da177e4
LT
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/backing-dev.h>
27#include <linux/buffer_head.h>
07f3f05c 28#include "internal.h"
1da177e4 29
66f3b8e2 30#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
f11b00f3 31
d0bceac7
JA
32/*
33 * We don't actually have pdflush, but this one is exported though /proc...
34 */
35int nr_pdflush_threads;
36
c4a77a6c
JA
37/*
38 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
40struct wb_writeback_args {
41 long nr_pages;
42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode;
d3ddec76
WF
44 int for_kupdate:1;
45 int range_cyclic:1;
46 int for_background:1;
c4a77a6c
JA
47};
48
03ba3782
JA
49/*
50 * Work items for the bdi_writeback threads
f11b00f3 51 */
03ba3782 52struct bdi_work {
8010c3b6
JA
53 struct list_head list; /* pending work list */
54 struct rcu_head rcu_head; /* for RCU free/clear of work */
03ba3782 55
8010c3b6
JA
56 unsigned long seen; /* threads that have seen this work */
57 atomic_t pending; /* number of threads still to do work */
03ba3782 58
8010c3b6 59 struct wb_writeback_args args; /* writeback arguments */
03ba3782 60
8010c3b6 61 unsigned long state; /* flag bits, see WS_* */
03ba3782
JA
62};
63
64enum {
65 WS_USED_B = 0,
66 WS_ONSTACK_B,
67};
68
69#define WS_USED (1 << WS_USED_B)
70#define WS_ONSTACK (1 << WS_ONSTACK_B)
71
72static inline bool bdi_work_on_stack(struct bdi_work *work)
73{
74 return test_bit(WS_ONSTACK_B, &work->state);
75}
76
77static inline void bdi_work_init(struct bdi_work *work,
b6e51316 78 struct wb_writeback_args *args)
03ba3782
JA
79{
80 INIT_RCU_HEAD(&work->rcu_head);
b6e51316 81 work->args = *args;
03ba3782
JA
82 work->state = WS_USED;
83}
84
f11b00f3
AB
85/**
86 * writeback_in_progress - determine whether there is writeback in progress
87 * @bdi: the device's backing_dev_info structure.
88 *
03ba3782
JA
89 * Determine whether there is writeback waiting to be handled against a
90 * backing device.
f11b00f3
AB
91 */
92int writeback_in_progress(struct backing_dev_info *bdi)
93{
03ba3782 94 return !list_empty(&bdi->work_list);
f11b00f3
AB
95}
96
03ba3782 97static void bdi_work_clear(struct bdi_work *work)
f11b00f3 98{
03ba3782
JA
99 clear_bit(WS_USED_B, &work->state);
100 smp_mb__after_clear_bit();
1ef7d9aa
NP
101 /*
102 * work can have disappeared at this point. bit waitq functions
103 * should be able to tolerate this, provided bdi_sched_wait does
104 * not dereference it's pointer argument.
105 */
03ba3782 106 wake_up_bit(&work->state, WS_USED_B);
f11b00f3
AB
107}
108
03ba3782 109static void bdi_work_free(struct rcu_head *head)
4195f73d 110{
03ba3782 111 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
4195f73d 112
03ba3782
JA
113 if (!bdi_work_on_stack(work))
114 kfree(work);
115 else
116 bdi_work_clear(work);
4195f73d
NP
117}
118
03ba3782 119static void wb_work_complete(struct bdi_work *work)
1da177e4 120{
c4a77a6c 121 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
77b9d059 122 int onstack = bdi_work_on_stack(work);
1da177e4
LT
123
124 /*
03ba3782
JA
125 * For allocated work, we can clear the done/seen bit right here.
126 * For on-stack work, we need to postpone both the clear and free
127 * to after the RCU grace period, since the stack could be invalidated
128 * as soon as bdi_work_clear() has done the wakeup.
1da177e4 129 */
77b9d059 130 if (!onstack)
03ba3782 131 bdi_work_clear(work);
77b9d059 132 if (sync_mode == WB_SYNC_NONE || onstack)
03ba3782
JA
133 call_rcu(&work->rcu_head, bdi_work_free);
134}
1da177e4 135
03ba3782
JA
136static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
137{
1da177e4 138 /*
03ba3782
JA
139 * The caller has retrieved the work arguments from this work,
140 * drop our reference. If this is the last ref, delete and free it
1da177e4 141 */
03ba3782
JA
142 if (atomic_dec_and_test(&work->pending)) {
143 struct backing_dev_info *bdi = wb->bdi;
1da177e4 144
03ba3782
JA
145 spin_lock(&bdi->wb_lock);
146 list_del_rcu(&work->list);
147 spin_unlock(&bdi->wb_lock);
1da177e4 148
03ba3782
JA
149 wb_work_complete(work);
150 }
151}
1da177e4 152
03ba3782
JA
153static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
154{
bcddc3f0
JA
155 work->seen = bdi->wb_mask;
156 BUG_ON(!work->seen);
157 atomic_set(&work->pending, bdi->wb_cnt);
158 BUG_ON(!bdi->wb_cnt);
1da177e4 159
bcddc3f0 160 /*
deed62ed
NP
161 * list_add_tail_rcu() contains the necessary barriers to
162 * make sure the above stores are seen before the item is
163 * noticed on the list
bcddc3f0 164 */
bcddc3f0
JA
165 spin_lock(&bdi->wb_lock);
166 list_add_tail_rcu(&work->list, &bdi->work_list);
167 spin_unlock(&bdi->wb_lock);
03ba3782
JA
168
169 /*
170 * If the default thread isn't there, make sure we add it. When
171 * it gets created and wakes up, we'll run this work.
172 */
173 if (unlikely(list_empty_careful(&bdi->wb_list)))
174 wake_up_process(default_backing_dev_info.wb.task);
175 else {
176 struct bdi_writeback *wb = &bdi->wb;
1da177e4 177
1ef7d9aa 178 if (wb->task)
03ba3782 179 wake_up_process(wb->task);
1da177e4 180 }
1da177e4
LT
181}
182
03ba3782
JA
183/*
184 * Used for on-stack allocated work items. The caller needs to wait until
185 * the wb threads have acked the work before it's safe to continue.
186 */
187static void bdi_wait_on_work_clear(struct bdi_work *work)
188{
189 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
190 TASK_UNINTERRUPTIBLE);
191}
1da177e4 192
f11fcae8 193static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
b6e51316 194 struct wb_writeback_args *args)
1da177e4 195{
03ba3782
JA
196 struct bdi_work *work;
197
bcddc3f0
JA
198 /*
199 * This is WB_SYNC_NONE writeback, so if allocation fails just
200 * wakeup the thread for old dirty data writeback
201 */
03ba3782 202 work = kmalloc(sizeof(*work), GFP_ATOMIC);
bcddc3f0 203 if (work) {
b6e51316 204 bdi_work_init(work, args);
bcddc3f0
JA
205 bdi_queue_work(bdi, work);
206 } else {
207 struct bdi_writeback *wb = &bdi->wb;
03ba3782 208
bcddc3f0
JA
209 if (wb->task)
210 wake_up_process(wb->task);
211 }
03ba3782
JA
212}
213
b6e51316
JA
214/**
215 * bdi_sync_writeback - start and wait for writeback
216 * @bdi: the backing device to write from
217 * @sb: write inodes from this super_block
218 *
219 * Description:
220 * This does WB_SYNC_ALL data integrity writeback and waits for the
221 * IO to complete. Callers must hold the sb s_umount semaphore for
222 * reading, to avoid having the super disappear before we are done.
223 */
224static void bdi_sync_writeback(struct backing_dev_info *bdi,
225 struct super_block *sb)
03ba3782 226{
b6e51316
JA
227 struct wb_writeback_args args = {
228 .sb = sb,
229 .sync_mode = WB_SYNC_ALL,
230 .nr_pages = LONG_MAX,
231 .range_cyclic = 0,
232 };
233 struct bdi_work work;
03ba3782 234
b6e51316
JA
235 bdi_work_init(&work, &args);
236 work.state |= WS_ONSTACK;
03ba3782 237
b6e51316
JA
238 bdi_queue_work(bdi, &work);
239 bdi_wait_on_work_clear(&work);
240}
241
242/**
243 * bdi_start_writeback - start writeback
244 * @bdi: the backing device to write from
245 * @nr_pages: the number of pages to write
246 *
247 * Description:
248 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
249 * started when this function returns, we make no guarentees on
250 * completion. Caller need not hold sb s_umount semaphore.
251 *
252 */
a72bfd4d
JA
253void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
254 long nr_pages)
b6e51316
JA
255{
256 struct wb_writeback_args args = {
a72bfd4d 257 .sb = sb,
b6e51316
JA
258 .sync_mode = WB_SYNC_NONE,
259 .nr_pages = nr_pages,
260 .range_cyclic = 1,
261 };
262
d3ddec76
WF
263 /*
264 * We treat @nr_pages=0 as the special case to do background writeback,
265 * ie. to sync pages until the background dirty threshold is reached.
266 */
267 if (!nr_pages) {
268 args.nr_pages = LONG_MAX;
269 args.for_background = 1;
270 }
271
b6e51316 272 bdi_alloc_queue_work(bdi, &args);
1da177e4
LT
273}
274
6610a0bc
AM
275/*
276 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
277 * furthest end of its superblock's dirty-inode list.
278 *
279 * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2 280 * already the most-recently-dirtied inode on the b_dirty list. If that is
6610a0bc
AM
281 * the case then the inode must have been redirtied while it was being written
282 * out and we don't reset its dirtied_when.
283 */
284static void redirty_tail(struct inode *inode)
285{
03ba3782 286 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
6610a0bc 287
03ba3782 288 if (!list_empty(&wb->b_dirty)) {
66f3b8e2 289 struct inode *tail;
6610a0bc 290
03ba3782 291 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
66f3b8e2 292 if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc
AM
293 inode->dirtied_when = jiffies;
294 }
03ba3782 295 list_move(&inode->i_list, &wb->b_dirty);
6610a0bc
AM
296}
297
c986d1e2 298/*
66f3b8e2 299 * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2 300 */
0e0f4fc2 301static void requeue_io(struct inode *inode)
c986d1e2 302{
03ba3782
JA
303 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
304
305 list_move(&inode->i_list, &wb->b_more_io);
c986d1e2
AM
306}
307
1c0eeaf5
JE
308static void inode_sync_complete(struct inode *inode)
309{
310 /*
311 * Prevent speculative execution through spin_unlock(&inode_lock);
312 */
313 smp_mb();
314 wake_up_bit(&inode->i_state, __I_SYNC);
315}
316
d2caa3c5
JL
317static bool inode_dirtied_after(struct inode *inode, unsigned long t)
318{
319 bool ret = time_after(inode->dirtied_when, t);
320#ifndef CONFIG_64BIT
321 /*
322 * For inodes being constantly redirtied, dirtied_when can get stuck.
323 * It _appears_ to be in the future, but is actually in distant past.
324 * This test is necessary to prevent such wrapped-around relative times
5b0830cb 325 * from permanently stopping the whole bdi writeback.
d2caa3c5
JL
326 */
327 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
328#endif
329 return ret;
330}
331
2c136579
FW
332/*
333 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
334 */
335static void move_expired_inodes(struct list_head *delaying_queue,
336 struct list_head *dispatch_queue,
337 unsigned long *older_than_this)
338{
5c03449d
SL
339 LIST_HEAD(tmp);
340 struct list_head *pos, *node;
cf137307 341 struct super_block *sb = NULL;
5c03449d 342 struct inode *inode;
cf137307 343 int do_sb_sort = 0;
5c03449d 344
2c136579 345 while (!list_empty(delaying_queue)) {
5c03449d 346 inode = list_entry(delaying_queue->prev, struct inode, i_list);
2c136579 347 if (older_than_this &&
d2caa3c5 348 inode_dirtied_after(inode, *older_than_this))
2c136579 349 break;
cf137307
JA
350 if (sb && sb != inode->i_sb)
351 do_sb_sort = 1;
352 sb = inode->i_sb;
5c03449d
SL
353 list_move(&inode->i_list, &tmp);
354 }
355
cf137307
JA
356 /* just one sb in list, splice to dispatch_queue and we're done */
357 if (!do_sb_sort) {
358 list_splice(&tmp, dispatch_queue);
359 return;
360 }
361
5c03449d
SL
362 /* Move inodes from one superblock together */
363 while (!list_empty(&tmp)) {
364 inode = list_entry(tmp.prev, struct inode, i_list);
365 sb = inode->i_sb;
366 list_for_each_prev_safe(pos, node, &tmp) {
367 inode = list_entry(pos, struct inode, i_list);
368 if (inode->i_sb == sb)
369 list_move(&inode->i_list, dispatch_queue);
370 }
2c136579
FW
371 }
372}
373
374/*
375 * Queue all expired dirty inodes for io, eldest first.
376 */
03ba3782 377static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
66f3b8e2 378{
03ba3782
JA
379 list_splice_init(&wb->b_more_io, wb->b_io.prev);
380 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
66f3b8e2
JA
381}
382
03ba3782 383static int write_inode(struct inode *inode, int sync)
08d8e974 384{
03ba3782
JA
385 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
386 return inode->i_sb->s_op->write_inode(inode, sync);
387 return 0;
08d8e974 388}
08d8e974 389
1da177e4 390/*
01c03194
CH
391 * Wait for writeback on an inode to complete.
392 */
393static void inode_wait_for_writeback(struct inode *inode)
394{
395 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
396 wait_queue_head_t *wqh;
397
398 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
399 do {
400 spin_unlock(&inode_lock);
401 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
402 spin_lock(&inode_lock);
403 } while (inode->i_state & I_SYNC);
404}
405
406/*
407 * Write out an inode's dirty pages. Called under inode_lock. Either the
408 * caller has ref on the inode (either via __iget or via syscall against an fd)
409 * or the inode has I_WILL_FREE set (via generic_forget_inode)
410 *
1da177e4
LT
411 * If `wait' is set, wait on the writeout.
412 *
413 * The whole writeout design is quite complex and fragile. We want to avoid
414 * starvation of particular inodes when others are being redirtied, prevent
415 * livelocks, etc.
416 *
417 * Called under inode_lock.
418 */
419static int
01c03194 420writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4 421{
1da177e4 422 struct address_space *mapping = inode->i_mapping;
1da177e4 423 int wait = wbc->sync_mode == WB_SYNC_ALL;
01c03194 424 unsigned dirty;
1da177e4
LT
425 int ret;
426
01c03194
CH
427 if (!atomic_read(&inode->i_count))
428 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
429 else
430 WARN_ON(inode->i_state & I_WILL_FREE);
431
432 if (inode->i_state & I_SYNC) {
433 /*
434 * If this inode is locked for writeback and we are not doing
66f3b8e2 435 * writeback-for-data-integrity, move it to b_more_io so that
01c03194
CH
436 * writeback can proceed with the other inodes on s_io.
437 *
438 * We'll have another go at writing back this inode when we
66f3b8e2 439 * completed a full scan of b_io.
01c03194
CH
440 */
441 if (!wait) {
442 requeue_io(inode);
443 return 0;
444 }
445
446 /*
447 * It's a data-integrity sync. We must wait.
448 */
449 inode_wait_for_writeback(inode);
450 }
451
1c0eeaf5 452 BUG_ON(inode->i_state & I_SYNC);
1da177e4 453
1c0eeaf5 454 /* Set I_SYNC, reset I_DIRTY */
1da177e4 455 dirty = inode->i_state & I_DIRTY;
1c0eeaf5 456 inode->i_state |= I_SYNC;
1da177e4
LT
457 inode->i_state &= ~I_DIRTY;
458
459 spin_unlock(&inode_lock);
460
461 ret = do_writepages(mapping, wbc);
462
463 /* Don't write the inode if only I_DIRTY_PAGES was set */
464 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
465 int err = write_inode(inode, wait);
466 if (ret == 0)
467 ret = err;
468 }
469
470 if (wait) {
471 int err = filemap_fdatawait(mapping);
472 if (ret == 0)
473 ret = err;
474 }
475
476 spin_lock(&inode_lock);
1c0eeaf5 477 inode->i_state &= ~I_SYNC;
84a89245 478 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
b3af9468 479 if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
ae1b7f7d 480 /*
b3af9468
WF
481 * More pages get dirtied by a fast dirtier.
482 */
483 goto select_queue;
484 } else if (inode->i_state & I_DIRTY) {
485 /*
486 * At least XFS will redirty the inode during the
487 * writeback (delalloc) and on io completion (isize).
ae1b7f7d
WF
488 */
489 redirty_tail(inode);
490 } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
1da177e4
LT
491 /*
492 * We didn't write back all the pages. nfs_writepages()
493 * sometimes bales out without doing anything. Redirty
66f3b8e2 494 * the inode; Move it from b_io onto b_more_io/b_dirty.
1b43ef91
AM
495 */
496 /*
497 * akpm: if the caller was the kupdate function we put
66f3b8e2 498 * this inode at the head of b_dirty so it gets first
1b43ef91
AM
499 * consideration. Otherwise, move it to the tail, for
500 * the reasons described there. I'm not really sure
501 * how much sense this makes. Presumably I had a good
502 * reasons for doing it this way, and I'd rather not
503 * muck with it at present.
1da177e4
LT
504 */
505 if (wbc->for_kupdate) {
506 /*
2c136579 507 * For the kupdate function we move the inode
66f3b8e2 508 * to b_more_io so it will get more writeout as
2c136579 509 * soon as the queue becomes uncongested.
1da177e4
LT
510 */
511 inode->i_state |= I_DIRTY_PAGES;
b3af9468 512select_queue:
8bc3be27
FW
513 if (wbc->nr_to_write <= 0) {
514 /*
515 * slice used up: queue for next turn
516 */
517 requeue_io(inode);
518 } else {
519 /*
520 * somehow blocked: retry later
521 */
522 redirty_tail(inode);
523 }
1da177e4
LT
524 } else {
525 /*
526 * Otherwise fully redirty the inode so that
527 * other inodes on this superblock will get some
528 * writeout. Otherwise heavy writing to one
529 * file would indefinitely suspend writeout of
530 * all the other files.
531 */
532 inode->i_state |= I_DIRTY_PAGES;
1b43ef91 533 redirty_tail(inode);
1da177e4 534 }
1da177e4
LT
535 } else if (atomic_read(&inode->i_count)) {
536 /*
537 * The inode is clean, inuse
538 */
539 list_move(&inode->i_list, &inode_in_use);
540 } else {
541 /*
542 * The inode is clean, unused
543 */
544 list_move(&inode->i_list, &inode_unused);
1da177e4
LT
545 }
546 }
1c0eeaf5 547 inode_sync_complete(inode);
1da177e4
LT
548 return ret;
549}
550
9ecc2738
JA
551static void unpin_sb_for_writeback(struct super_block **psb)
552{
553 struct super_block *sb = *psb;
554
555 if (sb) {
556 up_read(&sb->s_umount);
557 put_super(sb);
558 *psb = NULL;
559 }
560}
561
03ba3782
JA
562/*
563 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
564 * before calling writeback. So make sure that we do pin it, so it doesn't
565 * go away while we are writing inodes from it.
566 *
567 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
568 * 1 if we failed.
569 */
570static int pin_sb_for_writeback(struct writeback_control *wbc,
9ecc2738 571 struct inode *inode, struct super_block **psb)
03ba3782
JA
572{
573 struct super_block *sb = inode->i_sb;
574
9ecc2738
JA
575 /*
576 * If this sb is already pinned, nothing more to do. If not and
577 * *psb is non-NULL, unpin the old one first
578 */
579 if (sb == *psb)
580 return 0;
581 else if (*psb)
582 unpin_sb_for_writeback(psb);
583
03ba3782
JA
584 /*
585 * Caller must already hold the ref for this
586 */
587 if (wbc->sync_mode == WB_SYNC_ALL) {
588 WARN_ON(!rwsem_is_locked(&sb->s_umount));
589 return 0;
590 }
591
592 spin_lock(&sb_lock);
593 sb->s_count++;
594 if (down_read_trylock(&sb->s_umount)) {
595 if (sb->s_root) {
596 spin_unlock(&sb_lock);
9ecc2738 597 goto pinned;
03ba3782
JA
598 }
599 /*
600 * umounted, drop rwsem again and fall through to failure
601 */
602 up_read(&sb->s_umount);
603 }
604
605 sb->s_count--;
606 spin_unlock(&sb_lock);
607 return 1;
9ecc2738
JA
608pinned:
609 *psb = sb;
610 return 0;
03ba3782
JA
611}
612
613static void writeback_inodes_wb(struct bdi_writeback *wb,
614 struct writeback_control *wbc)
1da177e4 615{
9ecc2738 616 struct super_block *sb = wbc->sb, *pin_sb = NULL;
1da177e4
LT
617 const unsigned long start = jiffies; /* livelock avoidance */
618
ae8547b0 619 spin_lock(&inode_lock);
1da177e4 620
03ba3782
JA
621 if (!wbc->for_kupdate || list_empty(&wb->b_io))
622 queue_io(wb, wbc->older_than_this);
66f3b8e2 623
03ba3782
JA
624 while (!list_empty(&wb->b_io)) {
625 struct inode *inode = list_entry(wb->b_io.prev,
1da177e4 626 struct inode, i_list);
1da177e4
LT
627 long pages_skipped;
628
66f3b8e2
JA
629 /*
630 * super block given and doesn't match, skip this inode
631 */
632 if (sb && sb != inode->i_sb) {
633 redirty_tail(inode);
634 continue;
635 }
636
84a89245 637 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
7ef0d737
NP
638 requeue_io(inode);
639 continue;
640 }
641
03ba3782 642 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
1da177e4 643 wbc->encountered_congestion = 1;
66f3b8e2 644 if (!is_blkdev_sb)
1da177e4 645 break; /* Skip a congested fs */
0e0f4fc2 646 requeue_io(inode);
1da177e4
LT
647 continue; /* Skip a congested blockdev */
648 }
649
d2caa3c5
JL
650 /*
651 * Was this inode dirtied after sync_sb_inodes was called?
652 * This keeps sync from extra jobs and livelock.
653 */
654 if (inode_dirtied_after(inode, start))
1da177e4
LT
655 break;
656
9ecc2738 657 if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
03ba3782
JA
658 requeue_io(inode);
659 continue;
660 }
1da177e4 661
84a89245 662 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
1da177e4
LT
663 __iget(inode);
664 pages_skipped = wbc->pages_skipped;
01c03194 665 writeback_single_inode(inode, wbc);
1da177e4
LT
666 if (wbc->pages_skipped != pages_skipped) {
667 /*
668 * writeback is not making progress due to locked
669 * buffers. Skip this inode for now.
670 */
f57b9b7b 671 redirty_tail(inode);
1da177e4
LT
672 }
673 spin_unlock(&inode_lock);
1da177e4 674 iput(inode);
4ffc8444 675 cond_resched();
1da177e4 676 spin_lock(&inode_lock);
8bc3be27
FW
677 if (wbc->nr_to_write <= 0) {
678 wbc->more_io = 1;
1da177e4 679 break;
8bc3be27 680 }
03ba3782 681 if (!list_empty(&wb->b_more_io))
8bc3be27 682 wbc->more_io = 1;
1da177e4 683 }
38f21977 684
9ecc2738
JA
685 unpin_sb_for_writeback(&pin_sb);
686
66f3b8e2
JA
687 spin_unlock(&inode_lock);
688 /* Leave any unwritten inodes on b_io */
689}
690
03ba3782
JA
691void writeback_inodes_wbc(struct writeback_control *wbc)
692{
693 struct backing_dev_info *bdi = wbc->bdi;
694
695 writeback_inodes_wb(&bdi->wb, wbc);
696}
697
66f3b8e2 698/*
03ba3782
JA
699 * The maximum number of pages to writeout in a single bdi flush/kupdate
700 * operation. We do this so we don't hold I_SYNC against an inode for
701 * enormous amounts of time, which would block a userspace task which has
702 * been forced to throttle against that inode. Also, the code reevaluates
703 * the dirty each time it has written this many pages.
704 */
705#define MAX_WRITEBACK_PAGES 1024
706
707static inline bool over_bground_thresh(void)
708{
709 unsigned long background_thresh, dirty_thresh;
710
711 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
712
713 return (global_page_state(NR_FILE_DIRTY) +
714 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
715}
716
717/*
718 * Explicit flushing or periodic writeback of "old" data.
66f3b8e2 719 *
03ba3782
JA
720 * Define "old": the first time one of an inode's pages is dirtied, we mark the
721 * dirtying-time in the inode's address_space. So this periodic writeback code
722 * just walks the superblock inode list, writing back any inodes which are
723 * older than a specific point in time.
66f3b8e2 724 *
03ba3782
JA
725 * Try to run once per dirty_writeback_interval. But if a writeback event
726 * takes longer than a dirty_writeback_interval interval, then leave a
727 * one-second gap.
66f3b8e2 728 *
03ba3782
JA
729 * older_than_this takes precedence over nr_to_write. So we'll only write back
730 * all dirty pages if they are all attached to "old" mappings.
66f3b8e2 731 */
c4a77a6c
JA
732static long wb_writeback(struct bdi_writeback *wb,
733 struct wb_writeback_args *args)
66f3b8e2 734{
03ba3782
JA
735 struct writeback_control wbc = {
736 .bdi = wb->bdi,
c4a77a6c
JA
737 .sb = args->sb,
738 .sync_mode = args->sync_mode,
03ba3782 739 .older_than_this = NULL,
c4a77a6c
JA
740 .for_kupdate = args->for_kupdate,
741 .range_cyclic = args->range_cyclic,
03ba3782
JA
742 };
743 unsigned long oldest_jif;
744 long wrote = 0;
a5989bdc 745 struct inode *inode;
66f3b8e2 746
03ba3782
JA
747 if (wbc.for_kupdate) {
748 wbc.older_than_this = &oldest_jif;
749 oldest_jif = jiffies -
750 msecs_to_jiffies(dirty_expire_interval * 10);
751 }
c4a77a6c
JA
752 if (!wbc.range_cyclic) {
753 wbc.range_start = 0;
754 wbc.range_end = LLONG_MAX;
755 }
38f21977 756
03ba3782
JA
757 for (;;) {
758 /*
d3ddec76 759 * Stop writeback when nr_pages has been consumed
03ba3782 760 */
d3ddec76 761 if (args->nr_pages <= 0)
03ba3782 762 break;
66f3b8e2 763
38f21977 764 /*
d3ddec76
WF
765 * For background writeout, stop when we are below the
766 * background dirty threshold
38f21977 767 */
d3ddec76 768 if (args->for_background && !over_bground_thresh())
03ba3782 769 break;
38f21977 770
03ba3782
JA
771 wbc.more_io = 0;
772 wbc.encountered_congestion = 0;
773 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
774 wbc.pages_skipped = 0;
775 writeback_inodes_wb(wb, &wbc);
c4a77a6c 776 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
03ba3782
JA
777 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
778
779 /*
71fd05a8 780 * If we consumed everything, see if we have more
03ba3782 781 */
71fd05a8
JA
782 if (wbc.nr_to_write <= 0)
783 continue;
784 /*
785 * Didn't write everything and we don't have more IO, bail
786 */
787 if (!wbc.more_io)
03ba3782 788 break;
71fd05a8
JA
789 /*
790 * Did we write something? Try for more
791 */
792 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
793 continue;
794 /*
795 * Nothing written. Wait for some inode to
796 * become available for writeback. Otherwise
797 * we'll just busyloop.
798 */
799 spin_lock(&inode_lock);
800 if (!list_empty(&wb->b_more_io)) {
801 inode = list_entry(wb->b_more_io.prev,
802 struct inode, i_list);
803 inode_wait_for_writeback(inode);
03ba3782 804 }
71fd05a8 805 spin_unlock(&inode_lock);
03ba3782
JA
806 }
807
808 return wrote;
809}
810
811/*
812 * Return the next bdi_work struct that hasn't been processed by this
8010c3b6
JA
813 * wb thread yet. ->seen is initially set for each thread that exists
814 * for this device, when a thread first notices a piece of work it
815 * clears its bit. Depending on writeback type, the thread will notify
816 * completion on either receiving the work (WB_SYNC_NONE) or after
817 * it is done (WB_SYNC_ALL).
03ba3782
JA
818 */
819static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
820 struct bdi_writeback *wb)
821{
822 struct bdi_work *work, *ret = NULL;
823
824 rcu_read_lock();
825
826 list_for_each_entry_rcu(work, &bdi->work_list, list) {
77fad5e6 827 if (!test_bit(wb->nr, &work->seen))
03ba3782 828 continue;
77fad5e6 829 clear_bit(wb->nr, &work->seen);
03ba3782
JA
830
831 ret = work;
832 break;
833 }
834
835 rcu_read_unlock();
836 return ret;
837}
838
839static long wb_check_old_data_flush(struct bdi_writeback *wb)
840{
841 unsigned long expired;
842 long nr_pages;
843
844 expired = wb->last_old_flush +
845 msecs_to_jiffies(dirty_writeback_interval * 10);
846 if (time_before(jiffies, expired))
847 return 0;
848
849 wb->last_old_flush = jiffies;
850 nr_pages = global_page_state(NR_FILE_DIRTY) +
851 global_page_state(NR_UNSTABLE_NFS) +
852 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
853
c4a77a6c
JA
854 if (nr_pages) {
855 struct wb_writeback_args args = {
856 .nr_pages = nr_pages,
857 .sync_mode = WB_SYNC_NONE,
858 .for_kupdate = 1,
859 .range_cyclic = 1,
860 };
861
862 return wb_writeback(wb, &args);
863 }
03ba3782
JA
864
865 return 0;
866}
867
868/*
869 * Retrieve work items and do the writeback they describe
870 */
871long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
872{
873 struct backing_dev_info *bdi = wb->bdi;
874 struct bdi_work *work;
c4a77a6c 875 long wrote = 0;
03ba3782
JA
876
877 while ((work = get_next_work_item(bdi, wb)) != NULL) {
c4a77a6c 878 struct wb_writeback_args args = work->args;
03ba3782
JA
879
880 /*
881 * Override sync mode, in case we must wait for completion
882 */
883 if (force_wait)
c4a77a6c 884 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
03ba3782
JA
885
886 /*
887 * If this isn't a data integrity operation, just notify
888 * that we have seen this work and we are now starting it.
889 */
c4a77a6c 890 if (args.sync_mode == WB_SYNC_NONE)
03ba3782
JA
891 wb_clear_pending(wb, work);
892
c4a77a6c 893 wrote += wb_writeback(wb, &args);
03ba3782
JA
894
895 /*
896 * This is a data integrity writeback, so only do the
897 * notification when we have completed the work.
898 */
c4a77a6c 899 if (args.sync_mode == WB_SYNC_ALL)
03ba3782
JA
900 wb_clear_pending(wb, work);
901 }
902
903 /*
904 * Check for periodic writeback, kupdated() style
905 */
906 wrote += wb_check_old_data_flush(wb);
907
908 return wrote;
909}
910
911/*
912 * Handle writeback of dirty data for the device backed by this bdi. Also
913 * wakes up periodically and does kupdated style flushing.
914 */
915int bdi_writeback_task(struct bdi_writeback *wb)
916{
917 unsigned long last_active = jiffies;
918 unsigned long wait_jiffies = -1UL;
919 long pages_written;
920
921 while (!kthread_should_stop()) {
922 pages_written = wb_do_writeback(wb, 0);
923
924 if (pages_written)
925 last_active = jiffies;
926 else if (wait_jiffies != -1UL) {
927 unsigned long max_idle;
928
38f21977 929 /*
03ba3782
JA
930 * Longest period of inactivity that we tolerate. If we
931 * see dirty data again later, the task will get
932 * recreated automatically.
38f21977 933 */
03ba3782
JA
934 max_idle = max(5UL * 60 * HZ, wait_jiffies);
935 if (time_after(jiffies, max_idle + last_active))
936 break;
937 }
938
939 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
49db0414 940 schedule_timeout_interruptible(wait_jiffies);
03ba3782
JA
941 try_to_freeze();
942 }
943
944 return 0;
945}
946
947/*
b6e51316
JA
948 * Schedule writeback for all backing devices. This does WB_SYNC_NONE
949 * writeback, for integrity writeback see bdi_sync_writeback().
03ba3782 950 */
b6e51316 951static void bdi_writeback_all(struct super_block *sb, long nr_pages)
03ba3782 952{
b6e51316
JA
953 struct wb_writeback_args args = {
954 .sb = sb,
955 .nr_pages = nr_pages,
956 .sync_mode = WB_SYNC_NONE,
957 };
03ba3782 958 struct backing_dev_info *bdi;
03ba3782 959
cfc4ba53 960 rcu_read_lock();
03ba3782 961
cfc4ba53 962 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
03ba3782
JA
963 if (!bdi_has_dirty_io(bdi))
964 continue;
38f21977 965
b6e51316 966 bdi_alloc_queue_work(bdi, &args);
03ba3782
JA
967 }
968
cfc4ba53 969 rcu_read_unlock();
1da177e4
LT
970}
971
972/*
03ba3782
JA
973 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
974 * the whole world.
975 */
976void wakeup_flusher_threads(long nr_pages)
977{
03ba3782
JA
978 if (nr_pages == 0)
979 nr_pages = global_page_state(NR_FILE_DIRTY) +
980 global_page_state(NR_UNSTABLE_NFS);
b6e51316 981 bdi_writeback_all(NULL, nr_pages);
03ba3782
JA
982}
983
984static noinline void block_dump___mark_inode_dirty(struct inode *inode)
985{
986 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
987 struct dentry *dentry;
988 const char *name = "?";
989
990 dentry = d_find_alias(inode);
991 if (dentry) {
992 spin_lock(&dentry->d_lock);
993 name = (const char *) dentry->d_name.name;
994 }
995 printk(KERN_DEBUG
996 "%s(%d): dirtied inode %lu (%s) on %s\n",
997 current->comm, task_pid_nr(current), inode->i_ino,
998 name, inode->i_sb->s_id);
999 if (dentry) {
1000 spin_unlock(&dentry->d_lock);
1001 dput(dentry);
1002 }
1003 }
1004}
1005
1006/**
1007 * __mark_inode_dirty - internal function
1008 * @inode: inode to mark
1009 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1010 * Mark an inode as dirty. Callers should use mark_inode_dirty or
1011 * mark_inode_dirty_sync.
1da177e4 1012 *
03ba3782
JA
1013 * Put the inode on the super block's dirty list.
1014 *
1015 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1016 * dirty list only if it is hashed or if it refers to a blockdev.
1017 * If it was not hashed, it will never be added to the dirty list
1018 * even if it is later hashed, as it will have been marked dirty already.
1019 *
1020 * In short, make sure you hash any inodes _before_ you start marking
1021 * them dirty.
1da177e4 1022 *
03ba3782
JA
1023 * This function *must* be atomic for the I_DIRTY_PAGES case -
1024 * set_page_dirty() is called under spinlock in several places.
1da177e4 1025 *
03ba3782
JA
1026 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1027 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
1028 * the kernel-internal blockdev inode represents the dirtying time of the
1029 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
1030 * page->mapping->host, so the page-dirtying time is recorded in the internal
1031 * blockdev inode.
1da177e4 1032 */
03ba3782 1033void __mark_inode_dirty(struct inode *inode, int flags)
1da177e4 1034{
03ba3782 1035 struct super_block *sb = inode->i_sb;
1da177e4 1036
03ba3782
JA
1037 /*
1038 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1039 * dirty the inode itself
1040 */
1041 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1042 if (sb->s_op->dirty_inode)
1043 sb->s_op->dirty_inode(inode);
1044 }
1045
1046 /*
1047 * make sure that changes are seen by all cpus before we test i_state
1048 * -- mikulas
1049 */
1050 smp_mb();
1051
1052 /* avoid the locking if we can */
1053 if ((inode->i_state & flags) == flags)
1054 return;
1055
1056 if (unlikely(block_dump))
1057 block_dump___mark_inode_dirty(inode);
1058
1059 spin_lock(&inode_lock);
1060 if ((inode->i_state & flags) != flags) {
1061 const int was_dirty = inode->i_state & I_DIRTY;
1062
1063 inode->i_state |= flags;
1064
1065 /*
1066 * If the inode is being synced, just update its dirty state.
1067 * The unlocker will place the inode on the appropriate
1068 * superblock list, based upon its state.
1069 */
1070 if (inode->i_state & I_SYNC)
1071 goto out;
1072
1073 /*
1074 * Only add valid (hashed) inodes to the superblock's
1075 * dirty list. Add blockdev inodes as well.
1076 */
1077 if (!S_ISBLK(inode->i_mode)) {
1078 if (hlist_unhashed(&inode->i_hash))
1079 goto out;
1080 }
1081 if (inode->i_state & (I_FREEING|I_CLEAR))
1082 goto out;
1083
1084 /*
1085 * If the inode was already on b_dirty/b_io/b_more_io, don't
1086 * reposition it (that would break b_dirty time-ordering).
1087 */
1088 if (!was_dirty) {
1089 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
500b067c
JA
1090 struct backing_dev_info *bdi = wb->bdi;
1091
1092 if (bdi_cap_writeback_dirty(bdi) &&
1093 !test_bit(BDI_registered, &bdi->state)) {
1094 WARN_ON(1);
1095 printk(KERN_ERR "bdi-%s not registered\n",
1096 bdi->name);
1097 }
03ba3782
JA
1098
1099 inode->dirtied_when = jiffies;
1100 list_move(&inode->i_list, &wb->b_dirty);
1da177e4 1101 }
1da177e4 1102 }
03ba3782
JA
1103out:
1104 spin_unlock(&inode_lock);
1105}
1106EXPORT_SYMBOL(__mark_inode_dirty);
1107
1108/*
1109 * Write out a superblock's list of dirty inodes. A wait will be performed
1110 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1111 *
1112 * If older_than_this is non-NULL, then only write out inodes which
1113 * had their first dirtying at a time earlier than *older_than_this.
1114 *
03ba3782
JA
1115 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1116 * This function assumes that the blockdev superblock's inodes are backed by
1117 * a variety of queues, so all inodes are searched. For other superblocks,
1118 * assume that all inodes are backed by the same queue.
1119 *
1120 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1121 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1122 * on the writer throttling path, and we get decent balancing between many
1123 * throttled threads: we don't want them all piling up on inode_sync_wait.
1124 */
b6e51316 1125static void wait_sb_inodes(struct super_block *sb)
03ba3782
JA
1126{
1127 struct inode *inode, *old_inode = NULL;
1128
1129 /*
1130 * We need to be protected against the filesystem going from
1131 * r/o to r/w or vice versa.
1132 */
b6e51316 1133 WARN_ON(!rwsem_is_locked(&sb->s_umount));
03ba3782
JA
1134
1135 spin_lock(&inode_lock);
1136
1137 /*
1138 * Data integrity sync. Must wait for all pages under writeback,
1139 * because there may have been pages dirtied before our sync
1140 * call, but which had writeout started before we write it out.
1141 * In which case, the inode may not be on the dirty list, but
1142 * we still have to wait for that writeout.
1143 */
b6e51316 1144 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
03ba3782
JA
1145 struct address_space *mapping;
1146
1147 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1148 continue;
1149 mapping = inode->i_mapping;
1150 if (mapping->nrpages == 0)
1151 continue;
1152 __iget(inode);
1153 spin_unlock(&inode_lock);
1154 /*
1155 * We hold a reference to 'inode' so it couldn't have
1156 * been removed from s_inodes list while we dropped the
1157 * inode_lock. We cannot iput the inode now as we can
1158 * be holding the last reference and we cannot iput it
1159 * under inode_lock. So we keep the reference and iput
1160 * it later.
1161 */
1162 iput(old_inode);
1163 old_inode = inode;
1164
1165 filemap_fdatawait(mapping);
1166
1167 cond_resched();
1168
1169 spin_lock(&inode_lock);
1170 }
1171 spin_unlock(&inode_lock);
1172 iput(old_inode);
1da177e4
LT
1173}
1174
d8a8559c
JA
1175/**
1176 * writeback_inodes_sb - writeback dirty inodes from given super_block
1177 * @sb: the superblock
1da177e4 1178 *
d8a8559c
JA
1179 * Start writeback on some inodes on this super_block. No guarantees are made
1180 * on how many (if any) will be written, and this function does not wait
1181 * for IO completion of submitted IO. The number of pages submitted is
1182 * returned.
1da177e4 1183 */
b6e51316 1184void writeback_inodes_sb(struct super_block *sb)
1da177e4 1185{
d8a8559c
JA
1186 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1187 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1188 long nr_to_write;
1da177e4 1189
d8a8559c 1190 nr_to_write = nr_dirty + nr_unstable +
38f21977 1191 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
38f21977 1192
a72bfd4d 1193 bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
d8a8559c
JA
1194}
1195EXPORT_SYMBOL(writeback_inodes_sb);
1196
1197/**
1198 * sync_inodes_sb - sync sb inode pages
1199 * @sb: the superblock
1200 *
1201 * This function writes and waits on any dirty inode belonging to this
1202 * super_block. The number of pages synced is returned.
1203 */
b6e51316 1204void sync_inodes_sb(struct super_block *sb)
d8a8559c 1205{
b6e51316
JA
1206 bdi_sync_writeback(sb->s_bdi, sb);
1207 wait_sb_inodes(sb);
1da177e4 1208}
d8a8559c 1209EXPORT_SYMBOL(sync_inodes_sb);
1da177e4 1210
1da177e4 1211/**
7f04c26d
AA
1212 * write_inode_now - write an inode to disk
1213 * @inode: inode to write to disk
1214 * @sync: whether the write should be synchronous or not
1215 *
1216 * This function commits an inode to disk immediately if it is dirty. This is
1217 * primarily needed by knfsd.
1da177e4 1218 *
7f04c26d 1219 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4 1220 */
1da177e4
LT
1221int write_inode_now(struct inode *inode, int sync)
1222{
1223 int ret;
1224 struct writeback_control wbc = {
1225 .nr_to_write = LONG_MAX,
18914b18 1226 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e
OH
1227 .range_start = 0,
1228 .range_end = LLONG_MAX,
1da177e4
LT
1229 };
1230
1231 if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce2 1232 wbc.nr_to_write = 0;
1da177e4
LT
1233
1234 might_sleep();
1235 spin_lock(&inode_lock);
01c03194 1236 ret = writeback_single_inode(inode, &wbc);
1da177e4
LT
1237 spin_unlock(&inode_lock);
1238 if (sync)
1c0eeaf5 1239 inode_sync_wait(inode);
1da177e4
LT
1240 return ret;
1241}
1242EXPORT_SYMBOL(write_inode_now);
1243
1244/**
1245 * sync_inode - write an inode and its pages to disk.
1246 * @inode: the inode to sync
1247 * @wbc: controls the writeback mode
1248 *
1249 * sync_inode() will write an inode and its pages to disk. It will also
1250 * correctly update the inode on its superblock's dirty inode lists and will
1251 * update inode->i_state.
1252 *
1253 * The caller must have a ref on the inode.
1254 */
1255int sync_inode(struct inode *inode, struct writeback_control *wbc)
1256{
1257 int ret;
1258
1259 spin_lock(&inode_lock);
01c03194 1260 ret = writeback_single_inode(inode, wbc);
1da177e4
LT
1261 spin_unlock(&inode_lock);
1262 return ret;
1263}
1264EXPORT_SYMBOL(sync_inode);