]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/fs-writeback.c
Corrections in Documentation/block/ioprio.txt
[net-next-2.6.git] / fs / fs-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
11 * 10Apr2002 akpm@zip.com.au
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/backing-dev.h>
24#include <linux/buffer_head.h>
07f3f05c 25#include "internal.h"
1da177e4
LT
26
27/**
28 * __mark_inode_dirty - internal function
29 * @inode: inode to mark
30 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
31 * Mark an inode as dirty. Callers should use mark_inode_dirty or
32 * mark_inode_dirty_sync.
33 *
34 * Put the inode on the super block's dirty list.
35 *
36 * CAREFUL! We mark it dirty unconditionally, but move it onto the
37 * dirty list only if it is hashed or if it refers to a blockdev.
38 * If it was not hashed, it will never be added to the dirty list
39 * even if it is later hashed, as it will have been marked dirty already.
40 *
41 * In short, make sure you hash any inodes _before_ you start marking
42 * them dirty.
43 *
44 * This function *must* be atomic for the I_DIRTY_PAGES case -
45 * set_page_dirty() is called under spinlock in several places.
46 *
47 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
48 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
49 * the kernel-internal blockdev inode represents the dirtying time of the
50 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
51 * page->mapping->host, so the page-dirtying time is recorded in the internal
52 * blockdev inode.
53 */
54void __mark_inode_dirty(struct inode *inode, int flags)
55{
56 struct super_block *sb = inode->i_sb;
57
58 /*
59 * Don't do this for I_DIRTY_PAGES - that doesn't actually
60 * dirty the inode itself
61 */
62 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
63 if (sb->s_op->dirty_inode)
64 sb->s_op->dirty_inode(inode);
65 }
66
67 /*
68 * make sure that changes are seen by all cpus before we test i_state
69 * -- mikulas
70 */
71 smp_mb();
72
73 /* avoid the locking if we can */
74 if ((inode->i_state & flags) == flags)
75 return;
76
77 if (unlikely(block_dump)) {
78 struct dentry *dentry = NULL;
79 const char *name = "?";
80
81 if (!list_empty(&inode->i_dentry)) {
82 dentry = list_entry(inode->i_dentry.next,
83 struct dentry, d_alias);
84 if (dentry && dentry->d_name.name)
85 name = (const char *) dentry->d_name.name;
86 }
87
88 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
89 printk(KERN_DEBUG
90 "%s(%d): dirtied inode %lu (%s) on %s\n",
91 current->comm, current->pid, inode->i_ino,
92 name, inode->i_sb->s_id);
93 }
94
95 spin_lock(&inode_lock);
96 if ((inode->i_state & flags) != flags) {
97 const int was_dirty = inode->i_state & I_DIRTY;
98
99 inode->i_state |= flags;
100
101 /*
102 * If the inode is locked, just update its dirty state.
103 * The unlocker will place the inode on the appropriate
104 * superblock list, based upon its state.
105 */
106 if (inode->i_state & I_LOCK)
107 goto out;
108
109 /*
110 * Only add valid (hashed) inodes to the superblock's
111 * dirty list. Add blockdev inodes as well.
112 */
113 if (!S_ISBLK(inode->i_mode)) {
114 if (hlist_unhashed(&inode->i_hash))
115 goto out;
116 }
117 if (inode->i_state & (I_FREEING|I_CLEAR))
118 goto out;
119
120 /*
121 * If the inode was already on s_dirty or s_io, don't
122 * reposition it (that would break s_dirty time-ordering).
123 */
124 if (!was_dirty) {
125 inode->dirtied_when = jiffies;
126 list_move(&inode->i_list, &sb->s_dirty);
127 }
128 }
129out:
130 spin_unlock(&inode_lock);
131}
132
133EXPORT_SYMBOL(__mark_inode_dirty);
134
135static int write_inode(struct inode *inode, int sync)
136{
137 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
138 return inode->i_sb->s_op->write_inode(inode, sync);
139 return 0;
140}
141
142/*
143 * Write a single inode's dirty pages and inode data out to disk.
144 * If `wait' is set, wait on the writeout.
145 *
146 * The whole writeout design is quite complex and fragile. We want to avoid
147 * starvation of particular inodes when others are being redirtied, prevent
148 * livelocks, etc.
149 *
150 * Called under inode_lock.
151 */
152static int
153__sync_single_inode(struct inode *inode, struct writeback_control *wbc)
154{
155 unsigned dirty;
156 struct address_space *mapping = inode->i_mapping;
157 struct super_block *sb = inode->i_sb;
158 int wait = wbc->sync_mode == WB_SYNC_ALL;
159 int ret;
160
161 BUG_ON(inode->i_state & I_LOCK);
162
163 /* Set I_LOCK, reset I_DIRTY */
164 dirty = inode->i_state & I_DIRTY;
165 inode->i_state |= I_LOCK;
166 inode->i_state &= ~I_DIRTY;
167
168 spin_unlock(&inode_lock);
169
170 ret = do_writepages(mapping, wbc);
171
172 /* Don't write the inode if only I_DIRTY_PAGES was set */
173 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
174 int err = write_inode(inode, wait);
175 if (ret == 0)
176 ret = err;
177 }
178
179 if (wait) {
180 int err = filemap_fdatawait(mapping);
181 if (ret == 0)
182 ret = err;
183 }
184
185 spin_lock(&inode_lock);
186 inode->i_state &= ~I_LOCK;
187 if (!(inode->i_state & I_FREEING)) {
188 if (!(inode->i_state & I_DIRTY) &&
189 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
190 /*
191 * We didn't write back all the pages. nfs_writepages()
192 * sometimes bales out without doing anything. Redirty
193 * the inode. It is still on sb->s_io.
194 */
195 if (wbc->for_kupdate) {
196 /*
197 * For the kupdate function we leave the inode
198 * at the head of sb_dirty so it will get more
199 * writeout as soon as the queue becomes
200 * uncongested.
201 */
202 inode->i_state |= I_DIRTY_PAGES;
203 list_move_tail(&inode->i_list, &sb->s_dirty);
204 } else {
205 /*
206 * Otherwise fully redirty the inode so that
207 * other inodes on this superblock will get some
208 * writeout. Otherwise heavy writing to one
209 * file would indefinitely suspend writeout of
210 * all the other files.
211 */
212 inode->i_state |= I_DIRTY_PAGES;
213 inode->dirtied_when = jiffies;
214 list_move(&inode->i_list, &sb->s_dirty);
215 }
216 } else if (inode->i_state & I_DIRTY) {
217 /*
218 * Someone redirtied the inode while were writing back
219 * the pages.
220 */
221 list_move(&inode->i_list, &sb->s_dirty);
222 } else if (atomic_read(&inode->i_count)) {
223 /*
224 * The inode is clean, inuse
225 */
226 list_move(&inode->i_list, &inode_in_use);
227 } else {
228 /*
229 * The inode is clean, unused
230 */
231 list_move(&inode->i_list, &inode_unused);
1da177e4
LT
232 }
233 }
234 wake_up_inode(inode);
235 return ret;
236}
237
238/*
7f04c26d
AA
239 * Write out an inode's dirty pages. Called under inode_lock. Either the
240 * caller has ref on the inode (either via __iget or via syscall against an fd)
241 * or the inode has I_WILL_FREE set (via generic_forget_inode)
1da177e4
LT
242 */
243static int
7f04c26d 244__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4
LT
245{
246 wait_queue_head_t *wqh;
247
7f04c26d 248 if (!atomic_read(&inode->i_count))
659603ef 249 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
7f04c26d
AA
250 else
251 WARN_ON(inode->i_state & I_WILL_FREE);
252
1da177e4 253 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
4b89eed9
LT
254 struct address_space *mapping = inode->i_mapping;
255 int ret;
256
1da177e4 257 list_move(&inode->i_list, &inode->i_sb->s_dirty);
4b89eed9
LT
258
259 /*
260 * Even if we don't actually write the inode itself here,
261 * we can at least start some of the data writeout..
262 */
263 spin_unlock(&inode_lock);
264 ret = do_writepages(mapping, wbc);
265 spin_lock(&inode_lock);
266 return ret;
1da177e4
LT
267 }
268
269 /*
270 * It's a data-integrity sync. We must wait.
271 */
272 if (inode->i_state & I_LOCK) {
273 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK);
274
275 wqh = bit_waitqueue(&inode->i_state, __I_LOCK);
276 do {
1da177e4
LT
277 spin_unlock(&inode_lock);
278 __wait_on_bit(wqh, &wq, inode_wait,
279 TASK_UNINTERRUPTIBLE);
1da177e4
LT
280 spin_lock(&inode_lock);
281 } while (inode->i_state & I_LOCK);
282 }
283 return __sync_single_inode(inode, wbc);
284}
285
286/*
287 * Write out a superblock's list of dirty inodes. A wait will be performed
288 * upon no inodes, all inodes or the final one, depending upon sync_mode.
289 *
290 * If older_than_this is non-NULL, then only write out inodes which
291 * had their first dirtying at a time earlier than *older_than_this.
292 *
293 * If we're a pdlfush thread, then implement pdflush collision avoidance
294 * against the entire list.
295 *
296 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
297 * that it can be located for waiting on in __writeback_single_inode().
298 *
299 * Called under inode_lock.
300 *
301 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
302 * This function assumes that the blockdev superblock's inodes are backed by
303 * a variety of queues, so all inodes are searched. For other superblocks,
304 * assume that all inodes are backed by the same queue.
305 *
306 * FIXME: this linear search could get expensive with many fileystems. But
307 * how to fix? We need to go from an address_space to all inodes which share
308 * a queue with that address_space. (Easy: have a global "dirty superblocks"
309 * list).
310 *
311 * The inodes to be written are parked on sb->s_io. They are moved back onto
312 * sb->s_dirty as they are selected for writing. This way, none can be missed
313 * on the writer throttling path, and we get decent balancing between many
314 * throttled threads: we don't want them all piling up on __wait_on_inode.
315 */
316static void
317sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
318{
319 const unsigned long start = jiffies; /* livelock avoidance */
320
321 if (!wbc->for_kupdate || list_empty(&sb->s_io))
322 list_splice_init(&sb->s_dirty, &sb->s_io);
323
324 while (!list_empty(&sb->s_io)) {
325 struct inode *inode = list_entry(sb->s_io.prev,
326 struct inode, i_list);
327 struct address_space *mapping = inode->i_mapping;
328 struct backing_dev_info *bdi = mapping->backing_dev_info;
329 long pages_skipped;
330
331 if (!bdi_cap_writeback_dirty(bdi)) {
332 list_move(&inode->i_list, &sb->s_dirty);
7b0de42d 333 if (sb_is_blkdev_sb(sb)) {
1da177e4
LT
334 /*
335 * Dirty memory-backed blockdev: the ramdisk
336 * driver does this. Skip just this inode
337 */
338 continue;
339 }
340 /*
341 * Dirty memory-backed inode against a filesystem other
342 * than the kernel-internal bdev filesystem. Skip the
343 * entire superblock.
344 */
345 break;
346 }
347
348 if (wbc->nonblocking && bdi_write_congested(bdi)) {
349 wbc->encountered_congestion = 1;
7b0de42d 350 if (!sb_is_blkdev_sb(sb))
1da177e4
LT
351 break; /* Skip a congested fs */
352 list_move(&inode->i_list, &sb->s_dirty);
353 continue; /* Skip a congested blockdev */
354 }
355
356 if (wbc->bdi && bdi != wbc->bdi) {
7b0de42d 357 if (!sb_is_blkdev_sb(sb))
1da177e4
LT
358 break; /* fs has the wrong queue */
359 list_move(&inode->i_list, &sb->s_dirty);
360 continue; /* blockdev has wrong queue */
361 }
362
363 /* Was this inode dirtied after sync_sb_inodes was called? */
364 if (time_after(inode->dirtied_when, start))
365 break;
366
367 /* Was this inode dirtied too recently? */
368 if (wbc->older_than_this && time_after(inode->dirtied_when,
369 *wbc->older_than_this))
370 break;
371
372 /* Is another pdflush already flushing this queue? */
373 if (current_is_pdflush() && !writeback_acquire(bdi))
374 break;
375
376 BUG_ON(inode->i_state & I_FREEING);
377 __iget(inode);
378 pages_skipped = wbc->pages_skipped;
379 __writeback_single_inode(inode, wbc);
380 if (wbc->sync_mode == WB_SYNC_HOLD) {
381 inode->dirtied_when = jiffies;
382 list_move(&inode->i_list, &sb->s_dirty);
383 }
384 if (current_is_pdflush())
385 writeback_release(bdi);
386 if (wbc->pages_skipped != pages_skipped) {
387 /*
388 * writeback is not making progress due to locked
389 * buffers. Skip this inode for now.
390 */
391 list_move(&inode->i_list, &sb->s_dirty);
392 }
393 spin_unlock(&inode_lock);
1da177e4 394 iput(inode);
4ffc8444 395 cond_resched();
1da177e4
LT
396 spin_lock(&inode_lock);
397 if (wbc->nr_to_write <= 0)
398 break;
399 }
400 return; /* Leave any unwritten inodes on s_io */
401}
402
403/*
404 * Start writeback of dirty pagecache data against all unlocked inodes.
405 *
406 * Note:
407 * We don't need to grab a reference to superblock here. If it has non-empty
408 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
409 * past sync_inodes_sb() until both the ->s_dirty and ->s_io lists are
410 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
411 * inode from superblock lists we are OK.
412 *
413 * If `older_than_this' is non-zero then only flush inodes which have a
414 * flushtime older than *older_than_this.
415 *
416 * If `bdi' is non-zero then we will scan the first inode against each
417 * superblock until we find the matching ones. One group will be the dirty
418 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
419 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
420 * super-efficient but we're about to do a ton of I/O...
421 */
422void
423writeback_inodes(struct writeback_control *wbc)
424{
425 struct super_block *sb;
426
427 might_sleep();
428 spin_lock(&sb_lock);
429restart:
430 sb = sb_entry(super_blocks.prev);
431 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
432 if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) {
433 /* we're making our own get_super here */
434 sb->s_count++;
435 spin_unlock(&sb_lock);
436 /*
437 * If we can't get the readlock, there's no sense in
438 * waiting around, most of the time the FS is going to
439 * be unmounted by the time it is released.
440 */
441 if (down_read_trylock(&sb->s_umount)) {
442 if (sb->s_root) {
443 spin_lock(&inode_lock);
444 sync_sb_inodes(sb, wbc);
445 spin_unlock(&inode_lock);
446 }
447 up_read(&sb->s_umount);
448 }
449 spin_lock(&sb_lock);
450 if (__put_super_and_need_restart(sb))
451 goto restart;
452 }
453 if (wbc->nr_to_write <= 0)
454 break;
455 }
456 spin_unlock(&sb_lock);
457}
458
459/*
460 * writeback and wait upon the filesystem's dirty inodes. The caller will
461 * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
462 * used to park the written inodes on sb->s_dirty for the wait pass.
463 *
464 * A finite limit is set on the number of pages which will be written.
465 * To prevent infinite livelock of sys_sync().
466 *
467 * We add in the number of potentially dirty inodes, because each inode write
468 * can dirty pagecache in the underlying blockdev.
469 */
470void sync_inodes_sb(struct super_block *sb, int wait)
471{
472 struct writeback_control wbc = {
473 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
111ebb6e
OH
474 .range_start = 0,
475 .range_end = LLONG_MAX,
1da177e4 476 };
b1e7a8fd 477 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
fd39fc85 478 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1da177e4
LT
479
480 wbc.nr_to_write = nr_dirty + nr_unstable +
481 (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
482 nr_dirty + nr_unstable;
483 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
484 spin_lock(&inode_lock);
485 sync_sb_inodes(sb, &wbc);
486 spin_unlock(&inode_lock);
487}
488
489/*
490 * Rather lame livelock avoidance.
491 */
492static void set_sb_syncing(int val)
493{
494 struct super_block *sb;
495 spin_lock(&sb_lock);
496 sb = sb_entry(super_blocks.prev);
497 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
498 sb->s_syncing = val;
499 }
500 spin_unlock(&sb_lock);
501}
502
1da177e4 503/**
67be2dd1
MW
504 * sync_inodes - writes all inodes to disk
505 * @wait: wait for completion
1da177e4
LT
506 *
507 * sync_inodes() goes through each super block's dirty inode list, writes the
508 * inodes out, waits on the writeout and puts the inodes back on the normal
509 * list.
510 *
511 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle
512 * part of the sync functions is that the blockdev "superblock" is processed
513 * last. This is because the write_inode() function of a typical fs will
514 * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
515 * What we want to do is to perform all that dirtying first, and then write
516 * back all those inode blocks via the blockdev mapping in one sweep. So the
517 * additional (somewhat redundant) sync_blockdev() calls here are to make
518 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with
519 * outstanding dirty inodes, the writeback goes block-at-a-time within the
520 * filesystem's write_inode(). This is extremely slow.
521 */
618f0636 522static void __sync_inodes(int wait)
1da177e4
LT
523{
524 struct super_block *sb;
525
618f0636
KK
526 spin_lock(&sb_lock);
527restart:
528 list_for_each_entry(sb, &super_blocks, s_list) {
529 if (sb->s_syncing)
530 continue;
531 sb->s_syncing = 1;
532 sb->s_count++;
533 spin_unlock(&sb_lock);
534 down_read(&sb->s_umount);
535 if (sb->s_root) {
536 sync_inodes_sb(sb, wait);
537 sync_blockdev(sb->s_bdev);
538 }
539 up_read(&sb->s_umount);
540 spin_lock(&sb_lock);
541 if (__put_super_and_need_restart(sb))
542 goto restart;
1da177e4 543 }
618f0636
KK
544 spin_unlock(&sb_lock);
545}
546
547void sync_inodes(int wait)
548{
549 set_sb_syncing(0);
550 __sync_inodes(0);
551
1da177e4
LT
552 if (wait) {
553 set_sb_syncing(0);
618f0636 554 __sync_inodes(1);
1da177e4
LT
555 }
556}
557
558/**
7f04c26d
AA
559 * write_inode_now - write an inode to disk
560 * @inode: inode to write to disk
561 * @sync: whether the write should be synchronous or not
562 *
563 * This function commits an inode to disk immediately if it is dirty. This is
564 * primarily needed by knfsd.
1da177e4 565 *
7f04c26d 566 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4 567 */
1da177e4
LT
568int write_inode_now(struct inode *inode, int sync)
569{
570 int ret;
571 struct writeback_control wbc = {
572 .nr_to_write = LONG_MAX,
573 .sync_mode = WB_SYNC_ALL,
111ebb6e
OH
574 .range_start = 0,
575 .range_end = LLONG_MAX,
1da177e4
LT
576 };
577
578 if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce2 579 wbc.nr_to_write = 0;
1da177e4
LT
580
581 might_sleep();
582 spin_lock(&inode_lock);
583 ret = __writeback_single_inode(inode, &wbc);
584 spin_unlock(&inode_lock);
585 if (sync)
586 wait_on_inode(inode);
587 return ret;
588}
589EXPORT_SYMBOL(write_inode_now);
590
591/**
592 * sync_inode - write an inode and its pages to disk.
593 * @inode: the inode to sync
594 * @wbc: controls the writeback mode
595 *
596 * sync_inode() will write an inode and its pages to disk. It will also
597 * correctly update the inode on its superblock's dirty inode lists and will
598 * update inode->i_state.
599 *
600 * The caller must have a ref on the inode.
601 */
602int sync_inode(struct inode *inode, struct writeback_control *wbc)
603{
604 int ret;
605
606 spin_lock(&inode_lock);
607 ret = __writeback_single_inode(inode, wbc);
608 spin_unlock(&inode_lock);
609 return ret;
610}
611EXPORT_SYMBOL(sync_inode);
612
613/**
614 * generic_osync_inode - flush all dirty data for a given inode to disk
615 * @inode: inode to write
67be2dd1 616 * @mapping: the address_space that should be flushed
1da177e4
LT
617 * @what: what to write and wait upon
618 *
619 * This can be called by file_write functions for files which have the
620 * O_SYNC flag set, to flush dirty writes to disk.
621 *
622 * @what is a bitmask, specifying which part of the inode's data should be
b8887e6e 623 * written and waited upon.
1da177e4
LT
624 *
625 * OSYNC_DATA: i_mapping's dirty data
626 * OSYNC_METADATA: the buffers at i_mapping->private_list
627 * OSYNC_INODE: the inode itself
628 */
629
630int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
631{
632 int err = 0;
633 int need_write_inode_now = 0;
634 int err2;
635
1da177e4
LT
636 if (what & OSYNC_DATA)
637 err = filemap_fdatawrite(mapping);
638 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
639 err2 = sync_mapping_buffers(mapping);
640 if (!err)
641 err = err2;
642 }
643 if (what & OSYNC_DATA) {
644 err2 = filemap_fdatawait(mapping);
645 if (!err)
646 err = err2;
647 }
1da177e4
LT
648
649 spin_lock(&inode_lock);
650 if ((inode->i_state & I_DIRTY) &&
651 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
652 need_write_inode_now = 1;
653 spin_unlock(&inode_lock);
654
655 if (need_write_inode_now) {
656 err2 = write_inode_now(inode, 1);
657 if (!err)
658 err = err2;
659 }
660 else
661 wait_on_inode(inode);
662
663 return err;
664}
665
666EXPORT_SYMBOL(generic_osync_inode);
667
668/**
669 * writeback_acquire: attempt to get exclusive writeback access to a device
670 * @bdi: the device's backing_dev_info structure
671 *
672 * It is a waste of resources to have more than one pdflush thread blocked on
673 * a single request queue. Exclusion at the request_queue level is obtained
674 * via a flag in the request_queue's backing_dev_info.state.
675 *
676 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
677 * unless they implement their own. Which is somewhat inefficient, as this
678 * may prevent concurrent writeback against multiple devices.
679 */
680int writeback_acquire(struct backing_dev_info *bdi)
681{
682 return !test_and_set_bit(BDI_pdflush, &bdi->state);
683}
684
685/**
686 * writeback_in_progress: determine whether there is writeback in progress
1da177e4 687 * @bdi: the device's backing_dev_info structure.
b8887e6e
RD
688 *
689 * Determine whether there is writeback in progress against a backing device.
1da177e4
LT
690 */
691int writeback_in_progress(struct backing_dev_info *bdi)
692{
693 return test_bit(BDI_pdflush, &bdi->state);
694}
695
696/**
697 * writeback_release: relinquish exclusive writeback access against a device.
698 * @bdi: the device's backing_dev_info structure
699 */
700void writeback_release(struct backing_dev_info *bdi)
701{
702 BUG_ON(!writeback_in_progress(bdi));
703 clear_bit(BDI_pdflush, &bdi->state);
704}