]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/page-writeback.c
[PATCH] zoneid: fix up calculations for ZONEID_PGSHIFT
[net-next-2.6.git] / mm / page-writeback.c
CommitLineData
1da177e4 1/*
f30c2269 2 * mm/page-writeback.c
1da177e4
LT
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to writing back dirty pages at the
7 * address_space level.
8 *
9 * 10Apr2002 akpm@zip.com.au
10 * Initial version
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/fs.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/slab.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/init.h>
23#include <linux/backing-dev.h>
55e829af 24#include <linux/task_io_accounting_ops.h>
1da177e4
LT
25#include <linux/blkdev.h>
26#include <linux/mpage.h>
d08b3851 27#include <linux/rmap.h>
1da177e4
LT
28#include <linux/percpu.h>
29#include <linux/notifier.h>
30#include <linux/smp.h>
31#include <linux/sysctl.h>
32#include <linux/cpu.h>
33#include <linux/syscalls.h>
cf9a2ae8 34#include <linux/buffer_head.h>
811d736f 35#include <linux/pagevec.h>
1da177e4
LT
36
37/*
38 * The maximum number of pages to writeout in a single bdflush/kupdate
39 * operation. We do this so we don't hold I_LOCK against an inode for
40 * enormous amounts of time, which would block a userspace task which has
41 * been forced to throttle against that inode. Also, the code reevaluates
42 * the dirty each time it has written this many pages.
43 */
44#define MAX_WRITEBACK_PAGES 1024
45
46/*
47 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
48 * will look to see if it needs to force writeback or throttling.
49 */
50static long ratelimit_pages = 32;
51
e236a166 52static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
1da177e4
LT
53
54/*
55 * When balance_dirty_pages decides that the caller needs to perform some
56 * non-background writeback, this is how many pages it will attempt to write.
57 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
58 * large amounts of I/O are submitted.
59 */
60static inline long sync_writeback_pages(void)
61{
62 return ratelimit_pages + ratelimit_pages / 2;
63}
64
65/* The following parameters are exported via /proc/sys/vm */
66
67/*
68 * Start background writeback (via pdflush) at this percentage
69 */
70int dirty_background_ratio = 10;
71
72/*
73 * The generator of dirty data starts writeback at this percentage
74 */
75int vm_dirty_ratio = 40;
76
77/*
fd5403c7 78 * The interval between `kupdate'-style writebacks, in jiffies
1da177e4 79 */
f6ef9438 80int dirty_writeback_interval = 5 * HZ;
1da177e4
LT
81
82/*
fd5403c7 83 * The longest number of jiffies for which data is allowed to remain dirty
1da177e4 84 */
f6ef9438 85int dirty_expire_interval = 30 * HZ;
1da177e4
LT
86
87/*
88 * Flag that makes the machine dump writes/reads and block dirtyings.
89 */
90int block_dump;
91
92/*
ed5b43f1
BS
93 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
94 * a full sync is triggered after this time elapses without any disk activity.
1da177e4
LT
95 */
96int laptop_mode;
97
98EXPORT_SYMBOL(laptop_mode);
99
100/* End of sysctl-exported parameters */
101
102
103static void background_writeout(unsigned long _min_pages);
104
1da177e4
LT
105/*
106 * Work out the current dirty-memory clamping and background writeout
107 * thresholds.
108 *
109 * The main aim here is to lower them aggressively if there is a lot of mapped
110 * memory around. To avoid stressing page reclaim with lots of unreclaimable
111 * pages. It is better to clamp down on writers than to start swapping, and
112 * performing lots of scanning.
113 *
114 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
115 *
116 * We don't permit the clamping level to fall below 5% - that is getting rather
117 * excessive.
118 *
119 * We make sure that the background writeout level is below the adjusted
120 * clamping level.
121 */
122static void
c24f21bd
CL
123get_dirty_limits(long *pbackground, long *pdirty,
124 struct address_space *mapping)
1da177e4
LT
125{
126 int background_ratio; /* Percentages */
127 int dirty_ratio;
128 int unmapped_ratio;
129 long background;
130 long dirty;
40c99aae 131 unsigned long available_memory = vm_total_pages;
1da177e4
LT
132 struct task_struct *tsk;
133
1da177e4
LT
134#ifdef CONFIG_HIGHMEM
135 /*
dc6e29da 136 * We always exclude high memory from our count.
1da177e4 137 */
dc6e29da 138 available_memory -= totalhigh_pages;
1da177e4
LT
139#endif
140
141
c24f21bd
CL
142 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
143 global_page_state(NR_ANON_PAGES)) * 100) /
40c99aae 144 vm_total_pages;
1da177e4
LT
145
146 dirty_ratio = vm_dirty_ratio;
147 if (dirty_ratio > unmapped_ratio / 2)
148 dirty_ratio = unmapped_ratio / 2;
149
150 if (dirty_ratio < 5)
151 dirty_ratio = 5;
152
153 background_ratio = dirty_background_ratio;
154 if (background_ratio >= dirty_ratio)
155 background_ratio = dirty_ratio / 2;
156
157 background = (background_ratio * available_memory) / 100;
158 dirty = (dirty_ratio * available_memory) / 100;
159 tsk = current;
160 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
161 background += background / 4;
162 dirty += dirty / 4;
163 }
164 *pbackground = background;
165 *pdirty = dirty;
166}
167
168/*
169 * balance_dirty_pages() must be called by processes which are generating dirty
170 * data. It looks at the number of dirty pages in the machine and will force
171 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
172 * If we're over `background_thresh' then pdflush is woken to perform some
173 * writeout.
174 */
175static void balance_dirty_pages(struct address_space *mapping)
176{
1da177e4
LT
177 long nr_reclaimable;
178 long background_thresh;
179 long dirty_thresh;
180 unsigned long pages_written = 0;
181 unsigned long write_chunk = sync_writeback_pages();
182
183 struct backing_dev_info *bdi = mapping->backing_dev_info;
184
185 for (;;) {
186 struct writeback_control wbc = {
187 .bdi = bdi,
188 .sync_mode = WB_SYNC_NONE,
189 .older_than_this = NULL,
190 .nr_to_write = write_chunk,
111ebb6e 191 .range_cyclic = 1,
1da177e4
LT
192 };
193
c24f21bd
CL
194 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
195 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
196 global_page_state(NR_UNSTABLE_NFS);
197 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
198 dirty_thresh)
199 break;
1da177e4 200
e236a166
AM
201 if (!dirty_exceeded)
202 dirty_exceeded = 1;
1da177e4
LT
203
204 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
205 * Unstable writes are a feature of certain networked
206 * filesystems (i.e. NFS) in which data may have been
207 * written to the server's write cache, but has not yet
208 * been flushed to permanent storage.
209 */
210 if (nr_reclaimable) {
211 writeback_inodes(&wbc);
c24f21bd
CL
212 get_dirty_limits(&background_thresh,
213 &dirty_thresh, mapping);
214 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
215 global_page_state(NR_UNSTABLE_NFS);
216 if (nr_reclaimable +
217 global_page_state(NR_WRITEBACK)
218 <= dirty_thresh)
219 break;
1da177e4
LT
220 pages_written += write_chunk - wbc.nr_to_write;
221 if (pages_written >= write_chunk)
222 break; /* We've done our duty */
223 }
3fcfab16 224 congestion_wait(WRITE, HZ/10);
1da177e4
LT
225 }
226
c24f21bd
CL
227 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
228 <= dirty_thresh && dirty_exceeded)
229 dirty_exceeded = 0;
1da177e4
LT
230
231 if (writeback_in_progress(bdi))
232 return; /* pdflush is already working this queue */
233
234 /*
235 * In laptop mode, we wait until hitting the higher threshold before
236 * starting background writeout, and then write out all the way down
237 * to the lower threshold. So slow writers cause minimal disk activity.
238 *
239 * In normal mode, we start background writeout at the lower
240 * background_thresh, to keep the amount of dirty memory low.
241 */
242 if ((laptop_mode && pages_written) ||
243 (!laptop_mode && (nr_reclaimable > background_thresh)))
244 pdflush_operation(background_writeout, 0);
245}
246
edc79b2a
PZ
247void set_page_dirty_balance(struct page *page)
248{
249 if (set_page_dirty(page)) {
250 struct address_space *mapping = page_mapping(page);
251
252 if (mapping)
253 balance_dirty_pages_ratelimited(mapping);
254 }
255}
256
1da177e4 257/**
fa5a734e 258 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
67be2dd1 259 * @mapping: address_space which was dirtied
a580290c 260 * @nr_pages_dirtied: number of pages which the caller has just dirtied
1da177e4
LT
261 *
262 * Processes which are dirtying memory should call in here once for each page
263 * which was newly dirtied. The function will periodically check the system's
264 * dirty state and will initiate writeback if needed.
265 *
266 * On really big machines, get_writeback_state is expensive, so try to avoid
267 * calling it too often (ratelimiting). But once we're over the dirty memory
268 * limit we decrease the ratelimiting by a lot, to prevent individual processes
269 * from overshooting the limit by (ratelimit_pages) each.
270 */
fa5a734e
AM
271void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
272 unsigned long nr_pages_dirtied)
1da177e4 273{
fa5a734e
AM
274 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
275 unsigned long ratelimit;
276 unsigned long *p;
1da177e4
LT
277
278 ratelimit = ratelimit_pages;
279 if (dirty_exceeded)
280 ratelimit = 8;
281
282 /*
283 * Check the rate limiting. Also, we do not want to throttle real-time
284 * tasks in balance_dirty_pages(). Period.
285 */
fa5a734e
AM
286 preempt_disable();
287 p = &__get_cpu_var(ratelimits);
288 *p += nr_pages_dirtied;
289 if (unlikely(*p >= ratelimit)) {
290 *p = 0;
291 preempt_enable();
1da177e4
LT
292 balance_dirty_pages(mapping);
293 return;
294 }
fa5a734e 295 preempt_enable();
1da177e4 296}
fa5a734e 297EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1da177e4
LT
298
299void throttle_vm_writeout(void)
300{
1da177e4
LT
301 long background_thresh;
302 long dirty_thresh;
303
304 for ( ; ; ) {
c24f21bd 305 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
1da177e4
LT
306
307 /*
308 * Boost the allowable dirty threshold a bit for page
309 * allocators so they don't get DoS'ed by heavy writers
310 */
311 dirty_thresh += dirty_thresh / 10; /* wheeee... */
312
c24f21bd
CL
313 if (global_page_state(NR_UNSTABLE_NFS) +
314 global_page_state(NR_WRITEBACK) <= dirty_thresh)
315 break;
3fcfab16 316 congestion_wait(WRITE, HZ/10);
1da177e4
LT
317 }
318}
319
320
321/*
322 * writeback at least _min_pages, and keep writing until the amount of dirty
323 * memory is less than the background threshold, or until we're all clean.
324 */
325static void background_writeout(unsigned long _min_pages)
326{
327 long min_pages = _min_pages;
328 struct writeback_control wbc = {
329 .bdi = NULL,
330 .sync_mode = WB_SYNC_NONE,
331 .older_than_this = NULL,
332 .nr_to_write = 0,
333 .nonblocking = 1,
111ebb6e 334 .range_cyclic = 1,
1da177e4
LT
335 };
336
337 for ( ; ; ) {
1da177e4
LT
338 long background_thresh;
339 long dirty_thresh;
340
c24f21bd
CL
341 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
342 if (global_page_state(NR_FILE_DIRTY) +
343 global_page_state(NR_UNSTABLE_NFS) < background_thresh
1da177e4
LT
344 && min_pages <= 0)
345 break;
346 wbc.encountered_congestion = 0;
347 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
348 wbc.pages_skipped = 0;
349 writeback_inodes(&wbc);
350 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
351 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
352 /* Wrote less than expected */
3fcfab16 353 congestion_wait(WRITE, HZ/10);
1da177e4
LT
354 if (!wbc.encountered_congestion)
355 break;
356 }
357 }
358}
359
360/*
361 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
362 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
363 * -1 if all pdflush threads were busy.
364 */
687a21ce 365int wakeup_pdflush(long nr_pages)
1da177e4 366{
c24f21bd
CL
367 if (nr_pages == 0)
368 nr_pages = global_page_state(NR_FILE_DIRTY) +
369 global_page_state(NR_UNSTABLE_NFS);
1da177e4
LT
370 return pdflush_operation(background_writeout, nr_pages);
371}
372
373static void wb_timer_fn(unsigned long unused);
374static void laptop_timer_fn(unsigned long unused);
375
8d06afab
IM
376static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
377static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
1da177e4
LT
378
379/*
380 * Periodic writeback of "old" data.
381 *
382 * Define "old": the first time one of an inode's pages is dirtied, we mark the
383 * dirtying-time in the inode's address_space. So this periodic writeback code
384 * just walks the superblock inode list, writing back any inodes which are
385 * older than a specific point in time.
386 *
f6ef9438
BS
387 * Try to run once per dirty_writeback_interval. But if a writeback event
388 * takes longer than a dirty_writeback_interval interval, then leave a
1da177e4
LT
389 * one-second gap.
390 *
391 * older_than_this takes precedence over nr_to_write. So we'll only write back
392 * all dirty pages if they are all attached to "old" mappings.
393 */
394static void wb_kupdate(unsigned long arg)
395{
396 unsigned long oldest_jif;
397 unsigned long start_jif;
398 unsigned long next_jif;
399 long nr_to_write;
1da177e4
LT
400 struct writeback_control wbc = {
401 .bdi = NULL,
402 .sync_mode = WB_SYNC_NONE,
403 .older_than_this = &oldest_jif,
404 .nr_to_write = 0,
405 .nonblocking = 1,
406 .for_kupdate = 1,
111ebb6e 407 .range_cyclic = 1,
1da177e4
LT
408 };
409
410 sync_supers();
411
f6ef9438 412 oldest_jif = jiffies - dirty_expire_interval;
1da177e4 413 start_jif = jiffies;
f6ef9438 414 next_jif = start_jif + dirty_writeback_interval;
c24f21bd
CL
415 nr_to_write = global_page_state(NR_FILE_DIRTY) +
416 global_page_state(NR_UNSTABLE_NFS) +
1da177e4
LT
417 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
418 while (nr_to_write > 0) {
419 wbc.encountered_congestion = 0;
420 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
421 writeback_inodes(&wbc);
422 if (wbc.nr_to_write > 0) {
423 if (wbc.encountered_congestion)
3fcfab16 424 congestion_wait(WRITE, HZ/10);
1da177e4
LT
425 else
426 break; /* All the old data is written */
427 }
428 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
429 }
430 if (time_before(next_jif, jiffies + HZ))
431 next_jif = jiffies + HZ;
f6ef9438 432 if (dirty_writeback_interval)
1da177e4
LT
433 mod_timer(&wb_timer, next_jif);
434}
435
436/*
437 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
438 */
439int dirty_writeback_centisecs_handler(ctl_table *table, int write,
440 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
441{
f6ef9438
BS
442 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
443 if (dirty_writeback_interval) {
1da177e4 444 mod_timer(&wb_timer,
f6ef9438
BS
445 jiffies + dirty_writeback_interval);
446 } else {
1da177e4
LT
447 del_timer(&wb_timer);
448 }
449 return 0;
450}
451
452static void wb_timer_fn(unsigned long unused)
453{
454 if (pdflush_operation(wb_kupdate, 0) < 0)
455 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
456}
457
458static void laptop_flush(unsigned long unused)
459{
460 sys_sync();
461}
462
463static void laptop_timer_fn(unsigned long unused)
464{
465 pdflush_operation(laptop_flush, 0);
466}
467
468/*
469 * We've spun up the disk and we're in laptop mode: schedule writeback
470 * of all dirty data a few seconds from now. If the flush is already scheduled
471 * then push it back - the user is still using the disk.
472 */
473void laptop_io_completion(void)
474{
ed5b43f1 475 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4
LT
476}
477
478/*
479 * We're in laptop mode and we've just synced. The sync's writes will have
480 * caused another writeback to be scheduled by laptop_io_completion.
481 * Nothing needs to be written back anymore, so we unschedule the writeback.
482 */
483void laptop_sync_completion(void)
484{
485 del_timer(&laptop_mode_wb_timer);
486}
487
488/*
489 * If ratelimit_pages is too high then we can get into dirty-data overload
490 * if a large number of processes all perform writes at the same time.
491 * If it is too low then SMP machines will call the (expensive)
492 * get_writeback_state too often.
493 *
494 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
495 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
496 * thresholds before writeback cuts in.
497 *
498 * But the limit should not be set too high. Because it also controls the
499 * amount of memory which the balance_dirty_pages() caller has to write back.
500 * If this is too large then the caller will block on the IO queue all the
501 * time. So limit it to four megabytes - the balance_dirty_pages() caller
502 * will write six megabyte chunks, max.
503 */
504
2d1d43f6 505void writeback_set_ratelimit(void)
1da177e4 506{
40c99aae 507 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
1da177e4
LT
508 if (ratelimit_pages < 16)
509 ratelimit_pages = 16;
510 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
511 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
512}
513
26c2143b 514static int __cpuinit
1da177e4
LT
515ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
516{
2d1d43f6 517 writeback_set_ratelimit();
1da177e4
LT
518 return 0;
519}
520
74b85f37 521static struct notifier_block __cpuinitdata ratelimit_nb = {
1da177e4
LT
522 .notifier_call = ratelimit_handler,
523 .next = NULL,
524};
525
526/*
dc6e29da
LT
527 * Called early on to tune the page writeback dirty limits.
528 *
529 * We used to scale dirty pages according to how total memory
530 * related to pages that could be allocated for buffers (by
531 * comparing nr_free_buffer_pages() to vm_total_pages.
532 *
533 * However, that was when we used "dirty_ratio" to scale with
534 * all memory, and we don't do that any more. "dirty_ratio"
535 * is now applied to total non-HIGHPAGE memory (by subtracting
536 * totalhigh_pages from vm_total_pages), and as such we can't
537 * get into the old insane situation any more where we had
538 * large amounts of dirty pages compared to a small amount of
539 * non-HIGHMEM memory.
540 *
541 * But we might still want to scale the dirty_ratio by how
542 * much memory the box has..
1da177e4
LT
543 */
544void __init page_writeback_init(void)
545{
f6ef9438 546 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
2d1d43f6 547 writeback_set_ratelimit();
1da177e4
LT
548 register_cpu_notifier(&ratelimit_nb);
549}
550
811d736f
DH
551/**
552 * generic_writepages - walk the list of dirty pages of the given
553 * address space and writepage() all of them.
554 *
555 * @mapping: address space structure to write
556 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
557 *
558 * This is a library function, which implements the writepages()
559 * address_space_operation.
560 *
561 * If a page is already under I/O, generic_writepages() skips it, even
562 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
563 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
564 * and msync() need to guarantee that all the data which was dirty at the time
565 * the call was made get new I/O started against them. If wbc->sync_mode is
566 * WB_SYNC_ALL then we were called for data integrity and we must wait for
567 * existing IO to complete.
568 *
569 * Derived from mpage_writepages() - if you fix this you should check that
570 * also!
571 */
572int generic_writepages(struct address_space *mapping,
573 struct writeback_control *wbc)
574{
575 struct backing_dev_info *bdi = mapping->backing_dev_info;
576 int ret = 0;
577 int done = 0;
578 int (*writepage)(struct page *page, struct writeback_control *wbc);
579 struct pagevec pvec;
580 int nr_pages;
581 pgoff_t index;
582 pgoff_t end; /* Inclusive */
583 int scanned = 0;
584 int range_whole = 0;
585
586 if (wbc->nonblocking && bdi_write_congested(bdi)) {
587 wbc->encountered_congestion = 1;
588 return 0;
589 }
590
591 writepage = mapping->a_ops->writepage;
592
593 /* deal with chardevs and other special file */
594 if (!writepage)
595 return 0;
596
597 pagevec_init(&pvec, 0);
598 if (wbc->range_cyclic) {
599 index = mapping->writeback_index; /* Start from prev offset */
600 end = -1;
601 } else {
602 index = wbc->range_start >> PAGE_CACHE_SHIFT;
603 end = wbc->range_end >> PAGE_CACHE_SHIFT;
604 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
605 range_whole = 1;
606 scanned = 1;
607 }
608retry:
609 while (!done && (index <= end) &&
610 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
611 PAGECACHE_TAG_DIRTY,
612 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
613 unsigned i;
614
615 scanned = 1;
616 for (i = 0; i < nr_pages; i++) {
617 struct page *page = pvec.pages[i];
618
619 /*
620 * At this point we hold neither mapping->tree_lock nor
621 * lock on the page itself: the page may be truncated or
622 * invalidated (changing page->mapping to NULL), or even
623 * swizzled back from swapper_space to tmpfs file
624 * mapping
625 */
626 lock_page(page);
627
628 if (unlikely(page->mapping != mapping)) {
629 unlock_page(page);
630 continue;
631 }
632
633 if (!wbc->range_cyclic && page->index > end) {
634 done = 1;
635 unlock_page(page);
636 continue;
637 }
638
639 if (wbc->sync_mode != WB_SYNC_NONE)
640 wait_on_page_writeback(page);
641
642 if (PageWriteback(page) ||
643 !clear_page_dirty_for_io(page)) {
644 unlock_page(page);
645 continue;
646 }
647
648 ret = (*writepage)(page, wbc);
649 if (ret) {
650 if (ret == -ENOSPC)
651 set_bit(AS_ENOSPC, &mapping->flags);
652 else
653 set_bit(AS_EIO, &mapping->flags);
654 }
655
656 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
657 unlock_page(page);
658 if (ret || (--(wbc->nr_to_write) <= 0))
659 done = 1;
660 if (wbc->nonblocking && bdi_write_congested(bdi)) {
661 wbc->encountered_congestion = 1;
662 done = 1;
663 }
664 }
665 pagevec_release(&pvec);
666 cond_resched();
667 }
668 if (!scanned && !done) {
669 /*
670 * We hit the last page and there is more work to be done: wrap
671 * back to the start of the file
672 */
673 scanned = 1;
674 index = 0;
675 goto retry;
676 }
677 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
678 mapping->writeback_index = index;
679 return ret;
680}
681
682EXPORT_SYMBOL(generic_writepages);
683
1da177e4
LT
684int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
685{
22905f77
AM
686 int ret;
687
1da177e4
LT
688 if (wbc->nr_to_write <= 0)
689 return 0;
22905f77 690 wbc->for_writepages = 1;
1da177e4 691 if (mapping->a_ops->writepages)
d08b3851 692 ret = mapping->a_ops->writepages(mapping, wbc);
22905f77
AM
693 else
694 ret = generic_writepages(mapping, wbc);
695 wbc->for_writepages = 0;
696 return ret;
1da177e4
LT
697}
698
699/**
700 * write_one_page - write out a single page and optionally wait on I/O
701 *
67be2dd1
MW
702 * @page: the page to write
703 * @wait: if true, wait on writeout
1da177e4
LT
704 *
705 * The page must be locked by the caller and will be unlocked upon return.
706 *
707 * write_one_page() returns a negative error code if I/O failed.
708 */
709int write_one_page(struct page *page, int wait)
710{
711 struct address_space *mapping = page->mapping;
712 int ret = 0;
713 struct writeback_control wbc = {
714 .sync_mode = WB_SYNC_ALL,
715 .nr_to_write = 1,
716 };
717
718 BUG_ON(!PageLocked(page));
719
720 if (wait)
721 wait_on_page_writeback(page);
722
723 if (clear_page_dirty_for_io(page)) {
724 page_cache_get(page);
725 ret = mapping->a_ops->writepage(page, &wbc);
726 if (ret == 0 && wait) {
727 wait_on_page_writeback(page);
728 if (PageError(page))
729 ret = -EIO;
730 }
731 page_cache_release(page);
732 } else {
733 unlock_page(page);
734 }
735 return ret;
736}
737EXPORT_SYMBOL(write_one_page);
738
739/*
740 * For address_spaces which do not use buffers. Just tag the page as dirty in
741 * its radix tree.
742 *
743 * This is also used when a single buffer is being dirtied: we want to set the
744 * page dirty in that case, but not all the buffers. This is a "bottom-up"
745 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
746 *
747 * Most callers have locked the page, which pins the address_space in memory.
748 * But zap_pte_range() does not lock the page, however in that case the
749 * mapping is pinned by the vma's ->vm_file reference.
750 *
751 * We take care to handle the case where the page was truncated from the
752 * mapping by re-checking page_mapping() insode tree_lock.
753 */
754int __set_page_dirty_nobuffers(struct page *page)
755{
1da177e4
LT
756 if (!TestSetPageDirty(page)) {
757 struct address_space *mapping = page_mapping(page);
758 struct address_space *mapping2;
759
8c08540f
AM
760 if (!mapping)
761 return 1;
762
763 write_lock_irq(&mapping->tree_lock);
764 mapping2 = page_mapping(page);
765 if (mapping2) { /* Race with truncate? */
766 BUG_ON(mapping2 != mapping);
55e829af 767 if (mapping_cap_account_dirty(mapping)) {
8c08540f 768 __inc_zone_page_state(page, NR_FILE_DIRTY);
55e829af
AM
769 task_io_account_write(PAGE_CACHE_SIZE);
770 }
8c08540f
AM
771 radix_tree_tag_set(&mapping->page_tree,
772 page_index(page), PAGECACHE_TAG_DIRTY);
773 }
774 write_unlock_irq(&mapping->tree_lock);
775 if (mapping->host) {
776 /* !PageAnon && !swapper_space */
777 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4 778 }
4741c9fd 779 return 1;
1da177e4 780 }
4741c9fd 781 return 0;
1da177e4
LT
782}
783EXPORT_SYMBOL(__set_page_dirty_nobuffers);
784
785/*
786 * When a writepage implementation decides that it doesn't want to write this
787 * page for some reason, it should redirty the locked page via
788 * redirty_page_for_writepage() and it should then unlock the page and return 0
789 */
790int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
791{
792 wbc->pages_skipped++;
793 return __set_page_dirty_nobuffers(page);
794}
795EXPORT_SYMBOL(redirty_page_for_writepage);
796
797/*
798 * If the mapping doesn't provide a set_page_dirty a_op, then
799 * just fall through and assume that it wants buffer_heads.
800 */
801int fastcall set_page_dirty(struct page *page)
802{
803 struct address_space *mapping = page_mapping(page);
804
805 if (likely(mapping)) {
806 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
9361401e
DH
807#ifdef CONFIG_BLOCK
808 if (!spd)
809 spd = __set_page_dirty_buffers;
810#endif
811 return (*spd)(page);
1da177e4 812 }
4741c9fd
AM
813 if (!PageDirty(page)) {
814 if (!TestSetPageDirty(page))
815 return 1;
816 }
1da177e4
LT
817 return 0;
818}
819EXPORT_SYMBOL(set_page_dirty);
820
821/*
822 * set_page_dirty() is racy if the caller has no reference against
823 * page->mapping->host, and if the page is unlocked. This is because another
824 * CPU could truncate the page off the mapping and then free the mapping.
825 *
826 * Usually, the page _is_ locked, or the caller is a user-space process which
827 * holds a reference on the inode by having an open file.
828 *
829 * In other cases, the page should be locked before running set_page_dirty().
830 */
831int set_page_dirty_lock(struct page *page)
832{
833 int ret;
834
db37648c 835 lock_page_nosync(page);
1da177e4
LT
836 ret = set_page_dirty(page);
837 unlock_page(page);
838 return ret;
839}
840EXPORT_SYMBOL(set_page_dirty_lock);
841
1da177e4
LT
842/*
843 * Clear a page's dirty flag, while caring for dirty memory accounting.
844 * Returns true if the page was previously dirty.
845 *
846 * This is for preparing to put the page under writeout. We leave the page
847 * tagged as dirty in the radix tree so that a concurrent write-for-sync
848 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
849 * implementation will run either set_page_writeback() or set_page_dirty(),
850 * at which stage we bring the page's dirty flag and radix-tree dirty tag
851 * back into sync.
852 *
853 * This incoherency between the page's dirty flag and radix-tree tag is
854 * unfortunate, but it only exists while the page is locked.
855 */
856int clear_page_dirty_for_io(struct page *page)
857{
858 struct address_space *mapping = page_mapping(page);
859
7658cc28
LT
860 if (mapping && mapping_cap_account_dirty(mapping)) {
861 /*
862 * Yes, Virginia, this is indeed insane.
863 *
864 * We use this sequence to make sure that
865 * (a) we account for dirty stats properly
866 * (b) we tell the low-level filesystem to
867 * mark the whole page dirty if it was
868 * dirty in a pagetable. Only to then
869 * (c) clean the page again and return 1 to
870 * cause the writeback.
871 *
872 * This way we avoid all nasty races with the
873 * dirty bit in multiple places and clearing
874 * them concurrently from different threads.
875 *
876 * Note! Normally the "set_page_dirty(page)"
877 * has no effect on the actual dirty bit - since
878 * that will already usually be set. But we
879 * need the side effects, and it can help us
880 * avoid races.
881 *
882 * We basically use the page "master dirty bit"
883 * as a serialization point for all the different
884 * threads doing their things.
885 *
886 * FIXME! We still have a race here: if somebody
887 * adds the page back to the page tables in
888 * between the "page_mkclean()" and the "TestClearPageDirty()",
889 * we might have it mapped without the dirty bit set.
890 */
891 if (page_mkclean(page))
892 set_page_dirty(page);
893 if (TestClearPageDirty(page)) {
8c08540f 894 dec_zone_page_state(page, NR_FILE_DIRTY);
7658cc28 895 return 1;
1da177e4 896 }
7658cc28 897 return 0;
1da177e4 898 }
7658cc28 899 return TestClearPageDirty(page);
1da177e4 900}
58bb01a9 901EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4
LT
902
903int test_clear_page_writeback(struct page *page)
904{
905 struct address_space *mapping = page_mapping(page);
906 int ret;
907
908 if (mapping) {
909 unsigned long flags;
910
911 write_lock_irqsave(&mapping->tree_lock, flags);
912 ret = TestClearPageWriteback(page);
913 if (ret)
914 radix_tree_tag_clear(&mapping->page_tree,
915 page_index(page),
916 PAGECACHE_TAG_WRITEBACK);
917 write_unlock_irqrestore(&mapping->tree_lock, flags);
918 } else {
919 ret = TestClearPageWriteback(page);
920 }
921 return ret;
922}
923
924int test_set_page_writeback(struct page *page)
925{
926 struct address_space *mapping = page_mapping(page);
927 int ret;
928
929 if (mapping) {
930 unsigned long flags;
931
932 write_lock_irqsave(&mapping->tree_lock, flags);
933 ret = TestSetPageWriteback(page);
934 if (!ret)
935 radix_tree_tag_set(&mapping->page_tree,
936 page_index(page),
937 PAGECACHE_TAG_WRITEBACK);
938 if (!PageDirty(page))
939 radix_tree_tag_clear(&mapping->page_tree,
940 page_index(page),
941 PAGECACHE_TAG_DIRTY);
942 write_unlock_irqrestore(&mapping->tree_lock, flags);
943 } else {
944 ret = TestSetPageWriteback(page);
945 }
946 return ret;
947
948}
949EXPORT_SYMBOL(test_set_page_writeback);
950
951/*
952 * Return true if any of the pages in the mapping are marged with the
953 * passed tag.
954 */
955int mapping_tagged(struct address_space *mapping, int tag)
956{
957 unsigned long flags;
958 int ret;
959
960 read_lock_irqsave(&mapping->tree_lock, flags);
961 ret = radix_tree_tagged(&mapping->page_tree, tag);
962 read_unlock_irqrestore(&mapping->tree_lock, flags);
963 return ret;
964}
965EXPORT_SYMBOL(mapping_tagged);