]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/page-writeback.c
[PATCH] mm: remove_mapping() safeness
[net-next-2.6.git] / mm / page-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * mm/page-writeback.c.
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains functions related to writing back dirty pages at the
7 * address_space level.
8 *
9 * 10Apr2002 akpm@zip.com.au
10 * Initial version
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/spinlock.h>
16#include <linux/fs.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/slab.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/init.h>
23#include <linux/backing-dev.h>
24#include <linux/blkdev.h>
25#include <linux/mpage.h>
d08b3851 26#include <linux/rmap.h>
1da177e4
LT
27#include <linux/percpu.h>
28#include <linux/notifier.h>
29#include <linux/smp.h>
30#include <linux/sysctl.h>
31#include <linux/cpu.h>
32#include <linux/syscalls.h>
33
34/*
35 * The maximum number of pages to writeout in a single bdflush/kupdate
36 * operation. We do this so we don't hold I_LOCK against an inode for
37 * enormous amounts of time, which would block a userspace task which has
38 * been forced to throttle against that inode. Also, the code reevaluates
39 * the dirty each time it has written this many pages.
40 */
41#define MAX_WRITEBACK_PAGES 1024
42
43/*
44 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
45 * will look to see if it needs to force writeback or throttling.
46 */
47static long ratelimit_pages = 32;
48
49static long total_pages; /* The total number of pages in the machine. */
e236a166 50static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
1da177e4
LT
51
52/*
53 * When balance_dirty_pages decides that the caller needs to perform some
54 * non-background writeback, this is how many pages it will attempt to write.
55 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
56 * large amounts of I/O are submitted.
57 */
58static inline long sync_writeback_pages(void)
59{
60 return ratelimit_pages + ratelimit_pages / 2;
61}
62
63/* The following parameters are exported via /proc/sys/vm */
64
65/*
66 * Start background writeback (via pdflush) at this percentage
67 */
68int dirty_background_ratio = 10;
69
70/*
71 * The generator of dirty data starts writeback at this percentage
72 */
73int vm_dirty_ratio = 40;
74
75/*
fd5403c7 76 * The interval between `kupdate'-style writebacks, in jiffies
1da177e4 77 */
f6ef9438 78int dirty_writeback_interval = 5 * HZ;
1da177e4
LT
79
80/*
fd5403c7 81 * The longest number of jiffies for which data is allowed to remain dirty
1da177e4 82 */
f6ef9438 83int dirty_expire_interval = 30 * HZ;
1da177e4
LT
84
85/*
86 * Flag that makes the machine dump writes/reads and block dirtyings.
87 */
88int block_dump;
89
90/*
ed5b43f1
BS
91 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
92 * a full sync is triggered after this time elapses without any disk activity.
1da177e4
LT
93 */
94int laptop_mode;
95
96EXPORT_SYMBOL(laptop_mode);
97
98/* End of sysctl-exported parameters */
99
100
101static void background_writeout(unsigned long _min_pages);
102
1da177e4
LT
103/*
104 * Work out the current dirty-memory clamping and background writeout
105 * thresholds.
106 *
107 * The main aim here is to lower them aggressively if there is a lot of mapped
108 * memory around. To avoid stressing page reclaim with lots of unreclaimable
109 * pages. It is better to clamp down on writers than to start swapping, and
110 * performing lots of scanning.
111 *
112 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
113 *
114 * We don't permit the clamping level to fall below 5% - that is getting rather
115 * excessive.
116 *
117 * We make sure that the background writeout level is below the adjusted
118 * clamping level.
119 */
120static void
c24f21bd
CL
121get_dirty_limits(long *pbackground, long *pdirty,
122 struct address_space *mapping)
1da177e4
LT
123{
124 int background_ratio; /* Percentages */
125 int dirty_ratio;
126 int unmapped_ratio;
127 long background;
128 long dirty;
129 unsigned long available_memory = total_pages;
130 struct task_struct *tsk;
131
1da177e4
LT
132#ifdef CONFIG_HIGHMEM
133 /*
134 * If this mapping can only allocate from low memory,
135 * we exclude high memory from our count.
136 */
137 if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
138 available_memory -= totalhigh_pages;
139#endif
140
141
c24f21bd
CL
142 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
143 global_page_state(NR_ANON_PAGES)) * 100) /
144 total_pages;
1da177e4
LT
145
146 dirty_ratio = vm_dirty_ratio;
147 if (dirty_ratio > unmapped_ratio / 2)
148 dirty_ratio = unmapped_ratio / 2;
149
150 if (dirty_ratio < 5)
151 dirty_ratio = 5;
152
153 background_ratio = dirty_background_ratio;
154 if (background_ratio >= dirty_ratio)
155 background_ratio = dirty_ratio / 2;
156
157 background = (background_ratio * available_memory) / 100;
158 dirty = (dirty_ratio * available_memory) / 100;
159 tsk = current;
160 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
161 background += background / 4;
162 dirty += dirty / 4;
163 }
164 *pbackground = background;
165 *pdirty = dirty;
166}
167
168/*
169 * balance_dirty_pages() must be called by processes which are generating dirty
170 * data. It looks at the number of dirty pages in the machine and will force
171 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
172 * If we're over `background_thresh' then pdflush is woken to perform some
173 * writeout.
174 */
175static void balance_dirty_pages(struct address_space *mapping)
176{
1da177e4
LT
177 long nr_reclaimable;
178 long background_thresh;
179 long dirty_thresh;
180 unsigned long pages_written = 0;
181 unsigned long write_chunk = sync_writeback_pages();
182
183 struct backing_dev_info *bdi = mapping->backing_dev_info;
184
185 for (;;) {
186 struct writeback_control wbc = {
187 .bdi = bdi,
188 .sync_mode = WB_SYNC_NONE,
189 .older_than_this = NULL,
190 .nr_to_write = write_chunk,
111ebb6e 191 .range_cyclic = 1,
1da177e4
LT
192 };
193
c24f21bd
CL
194 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
195 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
196 global_page_state(NR_UNSTABLE_NFS);
197 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
198 dirty_thresh)
199 break;
1da177e4 200
e236a166
AM
201 if (!dirty_exceeded)
202 dirty_exceeded = 1;
1da177e4
LT
203
204 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
205 * Unstable writes are a feature of certain networked
206 * filesystems (i.e. NFS) in which data may have been
207 * written to the server's write cache, but has not yet
208 * been flushed to permanent storage.
209 */
210 if (nr_reclaimable) {
211 writeback_inodes(&wbc);
c24f21bd
CL
212 get_dirty_limits(&background_thresh,
213 &dirty_thresh, mapping);
214 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
215 global_page_state(NR_UNSTABLE_NFS);
216 if (nr_reclaimable +
217 global_page_state(NR_WRITEBACK)
218 <= dirty_thresh)
219 break;
1da177e4
LT
220 pages_written += write_chunk - wbc.nr_to_write;
221 if (pages_written >= write_chunk)
222 break; /* We've done our duty */
223 }
224 blk_congestion_wait(WRITE, HZ/10);
225 }
226
c24f21bd
CL
227 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
228 <= dirty_thresh && dirty_exceeded)
229 dirty_exceeded = 0;
1da177e4
LT
230
231 if (writeback_in_progress(bdi))
232 return; /* pdflush is already working this queue */
233
234 /*
235 * In laptop mode, we wait until hitting the higher threshold before
236 * starting background writeout, and then write out all the way down
237 * to the lower threshold. So slow writers cause minimal disk activity.
238 *
239 * In normal mode, we start background writeout at the lower
240 * background_thresh, to keep the amount of dirty memory low.
241 */
242 if ((laptop_mode && pages_written) ||
243 (!laptop_mode && (nr_reclaimable > background_thresh)))
244 pdflush_operation(background_writeout, 0);
245}
246
edc79b2a
PZ
247void set_page_dirty_balance(struct page *page)
248{
249 if (set_page_dirty(page)) {
250 struct address_space *mapping = page_mapping(page);
251
252 if (mapping)
253 balance_dirty_pages_ratelimited(mapping);
254 }
255}
256
1da177e4 257/**
fa5a734e 258 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
67be2dd1 259 * @mapping: address_space which was dirtied
a580290c 260 * @nr_pages_dirtied: number of pages which the caller has just dirtied
1da177e4
LT
261 *
262 * Processes which are dirtying memory should call in here once for each page
263 * which was newly dirtied. The function will periodically check the system's
264 * dirty state and will initiate writeback if needed.
265 *
266 * On really big machines, get_writeback_state is expensive, so try to avoid
267 * calling it too often (ratelimiting). But once we're over the dirty memory
268 * limit we decrease the ratelimiting by a lot, to prevent individual processes
269 * from overshooting the limit by (ratelimit_pages) each.
270 */
fa5a734e
AM
271void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
272 unsigned long nr_pages_dirtied)
1da177e4 273{
fa5a734e
AM
274 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
275 unsigned long ratelimit;
276 unsigned long *p;
1da177e4
LT
277
278 ratelimit = ratelimit_pages;
279 if (dirty_exceeded)
280 ratelimit = 8;
281
282 /*
283 * Check the rate limiting. Also, we do not want to throttle real-time
284 * tasks in balance_dirty_pages(). Period.
285 */
fa5a734e
AM
286 preempt_disable();
287 p = &__get_cpu_var(ratelimits);
288 *p += nr_pages_dirtied;
289 if (unlikely(*p >= ratelimit)) {
290 *p = 0;
291 preempt_enable();
1da177e4
LT
292 balance_dirty_pages(mapping);
293 return;
294 }
fa5a734e 295 preempt_enable();
1da177e4 296}
fa5a734e 297EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1da177e4
LT
298
299void throttle_vm_writeout(void)
300{
1da177e4
LT
301 long background_thresh;
302 long dirty_thresh;
303
304 for ( ; ; ) {
c24f21bd 305 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
1da177e4
LT
306
307 /*
308 * Boost the allowable dirty threshold a bit for page
309 * allocators so they don't get DoS'ed by heavy writers
310 */
311 dirty_thresh += dirty_thresh / 10; /* wheeee... */
312
c24f21bd
CL
313 if (global_page_state(NR_UNSTABLE_NFS) +
314 global_page_state(NR_WRITEBACK) <= dirty_thresh)
315 break;
1da177e4
LT
316 blk_congestion_wait(WRITE, HZ/10);
317 }
318}
319
320
321/*
322 * writeback at least _min_pages, and keep writing until the amount of dirty
323 * memory is less than the background threshold, or until we're all clean.
324 */
325static void background_writeout(unsigned long _min_pages)
326{
327 long min_pages = _min_pages;
328 struct writeback_control wbc = {
329 .bdi = NULL,
330 .sync_mode = WB_SYNC_NONE,
331 .older_than_this = NULL,
332 .nr_to_write = 0,
333 .nonblocking = 1,
111ebb6e 334 .range_cyclic = 1,
1da177e4
LT
335 };
336
337 for ( ; ; ) {
1da177e4
LT
338 long background_thresh;
339 long dirty_thresh;
340
c24f21bd
CL
341 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
342 if (global_page_state(NR_FILE_DIRTY) +
343 global_page_state(NR_UNSTABLE_NFS) < background_thresh
1da177e4
LT
344 && min_pages <= 0)
345 break;
346 wbc.encountered_congestion = 0;
347 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
348 wbc.pages_skipped = 0;
349 writeback_inodes(&wbc);
350 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
351 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
352 /* Wrote less than expected */
353 blk_congestion_wait(WRITE, HZ/10);
354 if (!wbc.encountered_congestion)
355 break;
356 }
357 }
358}
359
360/*
361 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
362 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
363 * -1 if all pdflush threads were busy.
364 */
687a21ce 365int wakeup_pdflush(long nr_pages)
1da177e4 366{
c24f21bd
CL
367 if (nr_pages == 0)
368 nr_pages = global_page_state(NR_FILE_DIRTY) +
369 global_page_state(NR_UNSTABLE_NFS);
1da177e4
LT
370 return pdflush_operation(background_writeout, nr_pages);
371}
372
373static void wb_timer_fn(unsigned long unused);
374static void laptop_timer_fn(unsigned long unused);
375
8d06afab
IM
376static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
377static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
1da177e4
LT
378
379/*
380 * Periodic writeback of "old" data.
381 *
382 * Define "old": the first time one of an inode's pages is dirtied, we mark the
383 * dirtying-time in the inode's address_space. So this periodic writeback code
384 * just walks the superblock inode list, writing back any inodes which are
385 * older than a specific point in time.
386 *
f6ef9438
BS
387 * Try to run once per dirty_writeback_interval. But if a writeback event
388 * takes longer than a dirty_writeback_interval interval, then leave a
1da177e4
LT
389 * one-second gap.
390 *
391 * older_than_this takes precedence over nr_to_write. So we'll only write back
392 * all dirty pages if they are all attached to "old" mappings.
393 */
394static void wb_kupdate(unsigned long arg)
395{
396 unsigned long oldest_jif;
397 unsigned long start_jif;
398 unsigned long next_jif;
399 long nr_to_write;
1da177e4
LT
400 struct writeback_control wbc = {
401 .bdi = NULL,
402 .sync_mode = WB_SYNC_NONE,
403 .older_than_this = &oldest_jif,
404 .nr_to_write = 0,
405 .nonblocking = 1,
406 .for_kupdate = 1,
111ebb6e 407 .range_cyclic = 1,
1da177e4
LT
408 };
409
410 sync_supers();
411
f6ef9438 412 oldest_jif = jiffies - dirty_expire_interval;
1da177e4 413 start_jif = jiffies;
f6ef9438 414 next_jif = start_jif + dirty_writeback_interval;
c24f21bd
CL
415 nr_to_write = global_page_state(NR_FILE_DIRTY) +
416 global_page_state(NR_UNSTABLE_NFS) +
1da177e4
LT
417 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
418 while (nr_to_write > 0) {
419 wbc.encountered_congestion = 0;
420 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
421 writeback_inodes(&wbc);
422 if (wbc.nr_to_write > 0) {
423 if (wbc.encountered_congestion)
424 blk_congestion_wait(WRITE, HZ/10);
425 else
426 break; /* All the old data is written */
427 }
428 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
429 }
430 if (time_before(next_jif, jiffies + HZ))
431 next_jif = jiffies + HZ;
f6ef9438 432 if (dirty_writeback_interval)
1da177e4
LT
433 mod_timer(&wb_timer, next_jif);
434}
435
436/*
437 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
438 */
439int dirty_writeback_centisecs_handler(ctl_table *table, int write,
440 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
441{
f6ef9438
BS
442 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
443 if (dirty_writeback_interval) {
1da177e4 444 mod_timer(&wb_timer,
f6ef9438
BS
445 jiffies + dirty_writeback_interval);
446 } else {
1da177e4
LT
447 del_timer(&wb_timer);
448 }
449 return 0;
450}
451
452static void wb_timer_fn(unsigned long unused)
453{
454 if (pdflush_operation(wb_kupdate, 0) < 0)
455 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
456}
457
458static void laptop_flush(unsigned long unused)
459{
460 sys_sync();
461}
462
463static void laptop_timer_fn(unsigned long unused)
464{
465 pdflush_operation(laptop_flush, 0);
466}
467
468/*
469 * We've spun up the disk and we're in laptop mode: schedule writeback
470 * of all dirty data a few seconds from now. If the flush is already scheduled
471 * then push it back - the user is still using the disk.
472 */
473void laptop_io_completion(void)
474{
ed5b43f1 475 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4
LT
476}
477
478/*
479 * We're in laptop mode and we've just synced. The sync's writes will have
480 * caused another writeback to be scheduled by laptop_io_completion.
481 * Nothing needs to be written back anymore, so we unschedule the writeback.
482 */
483void laptop_sync_completion(void)
484{
485 del_timer(&laptop_mode_wb_timer);
486}
487
488/*
489 * If ratelimit_pages is too high then we can get into dirty-data overload
490 * if a large number of processes all perform writes at the same time.
491 * If it is too low then SMP machines will call the (expensive)
492 * get_writeback_state too often.
493 *
494 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
495 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
496 * thresholds before writeback cuts in.
497 *
498 * But the limit should not be set too high. Because it also controls the
499 * amount of memory which the balance_dirty_pages() caller has to write back.
500 * If this is too large then the caller will block on the IO queue all the
501 * time. So limit it to four megabytes - the balance_dirty_pages() caller
502 * will write six megabyte chunks, max.
503 */
504
505static void set_ratelimit(void)
506{
507 ratelimit_pages = total_pages / (num_online_cpus() * 32);
508 if (ratelimit_pages < 16)
509 ratelimit_pages = 16;
510 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
511 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
512}
513
26c2143b 514static int __cpuinit
1da177e4
LT
515ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
516{
517 set_ratelimit();
518 return 0;
519}
520
74b85f37 521static struct notifier_block __cpuinitdata ratelimit_nb = {
1da177e4
LT
522 .notifier_call = ratelimit_handler,
523 .next = NULL,
524};
525
526/*
527 * If the machine has a large highmem:lowmem ratio then scale back the default
528 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
529 * number of buffer_heads.
530 */
531void __init page_writeback_init(void)
532{
533 long buffer_pages = nr_free_buffer_pages();
534 long correction;
535
536 total_pages = nr_free_pagecache_pages();
537
538 correction = (100 * 4 * buffer_pages) / total_pages;
539
540 if (correction < 100) {
541 dirty_background_ratio *= correction;
542 dirty_background_ratio /= 100;
543 vm_dirty_ratio *= correction;
544 vm_dirty_ratio /= 100;
545
546 if (dirty_background_ratio <= 0)
547 dirty_background_ratio = 1;
548 if (vm_dirty_ratio <= 0)
549 vm_dirty_ratio = 1;
550 }
f6ef9438 551 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
1da177e4
LT
552 set_ratelimit();
553 register_cpu_notifier(&ratelimit_nb);
554}
555
556int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
557{
22905f77
AM
558 int ret;
559
1da177e4
LT
560 if (wbc->nr_to_write <= 0)
561 return 0;
22905f77 562 wbc->for_writepages = 1;
1da177e4 563 if (mapping->a_ops->writepages)
d08b3851 564 ret = mapping->a_ops->writepages(mapping, wbc);
22905f77
AM
565 else
566 ret = generic_writepages(mapping, wbc);
567 wbc->for_writepages = 0;
568 return ret;
1da177e4
LT
569}
570
571/**
572 * write_one_page - write out a single page and optionally wait on I/O
573 *
67be2dd1
MW
574 * @page: the page to write
575 * @wait: if true, wait on writeout
1da177e4
LT
576 *
577 * The page must be locked by the caller and will be unlocked upon return.
578 *
579 * write_one_page() returns a negative error code if I/O failed.
580 */
581int write_one_page(struct page *page, int wait)
582{
583 struct address_space *mapping = page->mapping;
584 int ret = 0;
585 struct writeback_control wbc = {
586 .sync_mode = WB_SYNC_ALL,
587 .nr_to_write = 1,
588 };
589
590 BUG_ON(!PageLocked(page));
591
592 if (wait)
593 wait_on_page_writeback(page);
594
595 if (clear_page_dirty_for_io(page)) {
596 page_cache_get(page);
597 ret = mapping->a_ops->writepage(page, &wbc);
598 if (ret == 0 && wait) {
599 wait_on_page_writeback(page);
600 if (PageError(page))
601 ret = -EIO;
602 }
603 page_cache_release(page);
604 } else {
605 unlock_page(page);
606 }
607 return ret;
608}
609EXPORT_SYMBOL(write_one_page);
610
611/*
612 * For address_spaces which do not use buffers. Just tag the page as dirty in
613 * its radix tree.
614 *
615 * This is also used when a single buffer is being dirtied: we want to set the
616 * page dirty in that case, but not all the buffers. This is a "bottom-up"
617 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
618 *
619 * Most callers have locked the page, which pins the address_space in memory.
620 * But zap_pte_range() does not lock the page, however in that case the
621 * mapping is pinned by the vma's ->vm_file reference.
622 *
623 * We take care to handle the case where the page was truncated from the
624 * mapping by re-checking page_mapping() insode tree_lock.
625 */
626int __set_page_dirty_nobuffers(struct page *page)
627{
1da177e4
LT
628 if (!TestSetPageDirty(page)) {
629 struct address_space *mapping = page_mapping(page);
630 struct address_space *mapping2;
631
632 if (mapping) {
633 write_lock_irq(&mapping->tree_lock);
634 mapping2 = page_mapping(page);
635 if (mapping2) { /* Race with truncate? */
636 BUG_ON(mapping2 != mapping);
637 if (mapping_cap_account_dirty(mapping))
b1e7a8fd
CL
638 __inc_zone_page_state(page,
639 NR_FILE_DIRTY);
1da177e4
LT
640 radix_tree_tag_set(&mapping->page_tree,
641 page_index(page), PAGECACHE_TAG_DIRTY);
642 }
643 write_unlock_irq(&mapping->tree_lock);
644 if (mapping->host) {
645 /* !PageAnon && !swapper_space */
646 __mark_inode_dirty(mapping->host,
647 I_DIRTY_PAGES);
648 }
649 }
4741c9fd 650 return 1;
1da177e4 651 }
4741c9fd 652 return 0;
1da177e4
LT
653}
654EXPORT_SYMBOL(__set_page_dirty_nobuffers);
655
656/*
657 * When a writepage implementation decides that it doesn't want to write this
658 * page for some reason, it should redirty the locked page via
659 * redirty_page_for_writepage() and it should then unlock the page and return 0
660 */
661int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
662{
663 wbc->pages_skipped++;
664 return __set_page_dirty_nobuffers(page);
665}
666EXPORT_SYMBOL(redirty_page_for_writepage);
667
668/*
669 * If the mapping doesn't provide a set_page_dirty a_op, then
670 * just fall through and assume that it wants buffer_heads.
671 */
672int fastcall set_page_dirty(struct page *page)
673{
674 struct address_space *mapping = page_mapping(page);
675
676 if (likely(mapping)) {
677 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
678 if (spd)
679 return (*spd)(page);
680 return __set_page_dirty_buffers(page);
681 }
4741c9fd
AM
682 if (!PageDirty(page)) {
683 if (!TestSetPageDirty(page))
684 return 1;
685 }
1da177e4
LT
686 return 0;
687}
688EXPORT_SYMBOL(set_page_dirty);
689
690/*
691 * set_page_dirty() is racy if the caller has no reference against
692 * page->mapping->host, and if the page is unlocked. This is because another
693 * CPU could truncate the page off the mapping and then free the mapping.
694 *
695 * Usually, the page _is_ locked, or the caller is a user-space process which
696 * holds a reference on the inode by having an open file.
697 *
698 * In other cases, the page should be locked before running set_page_dirty().
699 */
700int set_page_dirty_lock(struct page *page)
701{
702 int ret;
703
704 lock_page(page);
705 ret = set_page_dirty(page);
706 unlock_page(page);
707 return ret;
708}
709EXPORT_SYMBOL(set_page_dirty_lock);
710
711/*
712 * Clear a page's dirty flag, while caring for dirty memory accounting.
713 * Returns true if the page was previously dirty.
714 */
715int test_clear_page_dirty(struct page *page)
716{
717 struct address_space *mapping = page_mapping(page);
718 unsigned long flags;
719
720 if (mapping) {
721 write_lock_irqsave(&mapping->tree_lock, flags);
722 if (TestClearPageDirty(page)) {
723 radix_tree_tag_clear(&mapping->page_tree,
724 page_index(page),
725 PAGECACHE_TAG_DIRTY);
b1e7a8fd 726 write_unlock_irqrestore(&mapping->tree_lock, flags);
d08b3851
PZ
727 /*
728 * We can continue to use `mapping' here because the
729 * page is locked, which pins the address_space
730 */
731 if (mapping_cap_account_dirty(mapping)) {
732 page_mkclean(page);
733 dec_zone_page_state(page, NR_FILE_DIRTY);
734 }
1da177e4
LT
735 return 1;
736 }
737 write_unlock_irqrestore(&mapping->tree_lock, flags);
738 return 0;
739 }
740 return TestClearPageDirty(page);
741}
742EXPORT_SYMBOL(test_clear_page_dirty);
743
744/*
745 * Clear a page's dirty flag, while caring for dirty memory accounting.
746 * Returns true if the page was previously dirty.
747 *
748 * This is for preparing to put the page under writeout. We leave the page
749 * tagged as dirty in the radix tree so that a concurrent write-for-sync
750 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
751 * implementation will run either set_page_writeback() or set_page_dirty(),
752 * at which stage we bring the page's dirty flag and radix-tree dirty tag
753 * back into sync.
754 *
755 * This incoherency between the page's dirty flag and radix-tree tag is
756 * unfortunate, but it only exists while the page is locked.
757 */
758int clear_page_dirty_for_io(struct page *page)
759{
760 struct address_space *mapping = page_mapping(page);
761
762 if (mapping) {
763 if (TestClearPageDirty(page)) {
d08b3851
PZ
764 if (mapping_cap_account_dirty(mapping)) {
765 page_mkclean(page);
b1e7a8fd 766 dec_zone_page_state(page, NR_FILE_DIRTY);
d08b3851 767 }
1da177e4
LT
768 return 1;
769 }
770 return 0;
771 }
772 return TestClearPageDirty(page);
773}
58bb01a9 774EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4
LT
775
776int test_clear_page_writeback(struct page *page)
777{
778 struct address_space *mapping = page_mapping(page);
779 int ret;
780
781 if (mapping) {
782 unsigned long flags;
783
784 write_lock_irqsave(&mapping->tree_lock, flags);
785 ret = TestClearPageWriteback(page);
786 if (ret)
787 radix_tree_tag_clear(&mapping->page_tree,
788 page_index(page),
789 PAGECACHE_TAG_WRITEBACK);
790 write_unlock_irqrestore(&mapping->tree_lock, flags);
791 } else {
792 ret = TestClearPageWriteback(page);
793 }
794 return ret;
795}
796
797int test_set_page_writeback(struct page *page)
798{
799 struct address_space *mapping = page_mapping(page);
800 int ret;
801
802 if (mapping) {
803 unsigned long flags;
804
805 write_lock_irqsave(&mapping->tree_lock, flags);
806 ret = TestSetPageWriteback(page);
807 if (!ret)
808 radix_tree_tag_set(&mapping->page_tree,
809 page_index(page),
810 PAGECACHE_TAG_WRITEBACK);
811 if (!PageDirty(page))
812 radix_tree_tag_clear(&mapping->page_tree,
813 page_index(page),
814 PAGECACHE_TAG_DIRTY);
815 write_unlock_irqrestore(&mapping->tree_lock, flags);
816 } else {
817 ret = TestSetPageWriteback(page);
818 }
819 return ret;
820
821}
822EXPORT_SYMBOL(test_set_page_writeback);
823
275a082f
TM
824/*
825 * Wakes up tasks that are being throttled due to writeback congestion
826 */
827void writeback_congestion_end(void)
828{
829 blk_congestion_end(WRITE);
830}
831EXPORT_SYMBOL(writeback_congestion_end);
832
1da177e4
LT
833/*
834 * Return true if any of the pages in the mapping are marged with the
835 * passed tag.
836 */
837int mapping_tagged(struct address_space *mapping, int tag)
838{
839 unsigned long flags;
840 int ret;
841
842 read_lock_irqsave(&mapping->tree_lock, flags);
843 ret = radix_tree_tagged(&mapping->page_tree, tag);
844 read_unlock_irqrestore(&mapping->tree_lock, flags);
845 return ret;
846}
847EXPORT_SYMBOL(mapping_tagged);