]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmscan.c
mm/bootmem.c: properly __init-annotate helper functions
[net-next-2.6.git] / mm / vmscan.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
e129b5c2 22#include <linux/vmstat.h>
1da177e4
LT
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
248a0301 37#include <linux/delay.h>
3218ae14 38#include <linux/kthread.h>
7dfb7103 39#include <linux/freezer.h>
66e1707b 40#include <linux/memcontrol.h>
873b4771 41#include <linux/delayacct.h>
af936a16 42#include <linux/sysctl.h>
1da177e4
LT
43
44#include <asm/tlbflush.h>
45#include <asm/div64.h>
46
47#include <linux/swapops.h>
48
0f8053a5
NP
49#include "internal.h"
50
1da177e4 51struct scan_control {
1da177e4
LT
52 /* Incremented by the number of inactive pages that were scanned */
53 unsigned long nr_scanned;
54
a79311c1
RR
55 /* Number of pages freed so far during a call to shrink_zones() */
56 unsigned long nr_reclaimed;
57
22fba335
KM
58 /* How many pages shrink_list() should reclaim */
59 unsigned long nr_to_reclaim;
60
7b51755c
KM
61 unsigned long hibernation_mode;
62
1da177e4 63 /* This context's GFP mask */
6daa0e28 64 gfp_t gfp_mask;
1da177e4
LT
65
66 int may_writepage;
67
a6dc60f8
JW
68 /* Can mapped pages be reclaimed? */
69 int may_unmap;
f1fd1067 70
2e2e4259
KM
71 /* Can pages be swapped as part of reclaim? */
72 int may_swap;
73
d6277db4 74 int swappiness;
408d8544
NP
75
76 int all_unreclaimable;
5ad333eb
AW
77
78 int order;
66e1707b
BS
79
80 /* Which cgroup do we reclaim from */
81 struct mem_cgroup *mem_cgroup;
82
327c0e96
KH
83 /*
84 * Nodemask of nodes allowed by the caller. If NULL, all nodes
85 * are scanned.
86 */
87 nodemask_t *nodemask;
88
66e1707b
BS
89 /* Pluggable isolate pages callback */
90 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
91 unsigned long *scanned, int order, int mode,
92 struct zone *z, struct mem_cgroup *mem_cont,
4f98a2fe 93 int active, int file);
1da177e4
LT
94};
95
1da177e4
LT
96#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
97
98#ifdef ARCH_HAS_PREFETCH
99#define prefetch_prev_lru_page(_page, _base, _field) \
100 do { \
101 if ((_page)->lru.prev != _base) { \
102 struct page *prev; \
103 \
104 prev = lru_to_page(&(_page->lru)); \
105 prefetch(&prev->_field); \
106 } \
107 } while (0)
108#else
109#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
110#endif
111
112#ifdef ARCH_HAS_PREFETCHW
113#define prefetchw_prev_lru_page(_page, _base, _field) \
114 do { \
115 if ((_page)->lru.prev != _base) { \
116 struct page *prev; \
117 \
118 prev = lru_to_page(&(_page->lru)); \
119 prefetchw(&prev->_field); \
120 } \
121 } while (0)
122#else
123#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
124#endif
125
126/*
127 * From 0 .. 100. Higher means more swappy.
128 */
129int vm_swappiness = 60;
bd1e22b8 130long vm_total_pages; /* The total number of pages which the VM controls */
1da177e4
LT
131
132static LIST_HEAD(shrinker_list);
133static DECLARE_RWSEM(shrinker_rwsem);
134
00f0b825 135#ifdef CONFIG_CGROUP_MEM_RES_CTLR
e72e2bd6 136#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
91a45470 137#else
e72e2bd6 138#define scanning_global_lru(sc) (1)
91a45470
KH
139#endif
140
6e901571
KM
141static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
142 struct scan_control *sc)
143{
e72e2bd6 144 if (!scanning_global_lru(sc))
3e2f41f1
KM
145 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
146
6e901571
KM
147 return &zone->reclaim_stat;
148}
149
0b217676
VL
150static unsigned long zone_nr_lru_pages(struct zone *zone,
151 struct scan_control *sc, enum lru_list lru)
c9f299d9 152{
e72e2bd6 153 if (!scanning_global_lru(sc))
a3d8e054
KM
154 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
155
c9f299d9
KM
156 return zone_page_state(zone, NR_LRU_BASE + lru);
157}
158
159
1da177e4
LT
160/*
161 * Add a shrinker callback to be called from the vm
162 */
8e1f936b 163void register_shrinker(struct shrinker *shrinker)
1da177e4 164{
8e1f936b
RR
165 shrinker->nr = 0;
166 down_write(&shrinker_rwsem);
167 list_add_tail(&shrinker->list, &shrinker_list);
168 up_write(&shrinker_rwsem);
1da177e4 169}
8e1f936b 170EXPORT_SYMBOL(register_shrinker);
1da177e4
LT
171
172/*
173 * Remove one
174 */
8e1f936b 175void unregister_shrinker(struct shrinker *shrinker)
1da177e4
LT
176{
177 down_write(&shrinker_rwsem);
178 list_del(&shrinker->list);
179 up_write(&shrinker_rwsem);
1da177e4 180}
8e1f936b 181EXPORT_SYMBOL(unregister_shrinker);
1da177e4
LT
182
183#define SHRINK_BATCH 128
184/*
185 * Call the shrink functions to age shrinkable caches
186 *
187 * Here we assume it costs one seek to replace a lru page and that it also
188 * takes a seek to recreate a cache object. With this in mind we age equal
189 * percentages of the lru and ageable caches. This should balance the seeks
190 * generated by these structures.
191 *
183ff22b 192 * If the vm encountered mapped pages on the LRU it increase the pressure on
1da177e4
LT
193 * slab to avoid swapping.
194 *
195 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
196 *
197 * `lru_pages' represents the number of on-LRU pages in all the zones which
198 * are eligible for the caller's allocation attempt. It is used for balancing
199 * slab reclaim versus page reclaim.
b15e0905
AM
200 *
201 * Returns the number of slab objects which we shrunk.
1da177e4 202 */
69e05944
AM
203unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
204 unsigned long lru_pages)
1da177e4
LT
205{
206 struct shrinker *shrinker;
69e05944 207 unsigned long ret = 0;
1da177e4
LT
208
209 if (scanned == 0)
210 scanned = SWAP_CLUSTER_MAX;
211
212 if (!down_read_trylock(&shrinker_rwsem))
b15e0905 213 return 1; /* Assume we'll be able to shrink next time */
1da177e4
LT
214
215 list_for_each_entry(shrinker, &shrinker_list, list) {
216 unsigned long long delta;
217 unsigned long total_scan;
8e1f936b 218 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
1da177e4
LT
219
220 delta = (4 * scanned) / shrinker->seeks;
ea164d73 221 delta *= max_pass;
1da177e4
LT
222 do_div(delta, lru_pages + 1);
223 shrinker->nr += delta;
ea164d73 224 if (shrinker->nr < 0) {
88c3bd70
DR
225 printk(KERN_ERR "shrink_slab: %pF negative objects to "
226 "delete nr=%ld\n",
227 shrinker->shrink, shrinker->nr);
ea164d73
AA
228 shrinker->nr = max_pass;
229 }
230
231 /*
232 * Avoid risking looping forever due to too large nr value:
233 * never try to free more than twice the estimate number of
234 * freeable entries.
235 */
236 if (shrinker->nr > max_pass * 2)
237 shrinker->nr = max_pass * 2;
1da177e4
LT
238
239 total_scan = shrinker->nr;
240 shrinker->nr = 0;
241
242 while (total_scan >= SHRINK_BATCH) {
243 long this_scan = SHRINK_BATCH;
244 int shrink_ret;
b15e0905 245 int nr_before;
1da177e4 246
8e1f936b
RR
247 nr_before = (*shrinker->shrink)(0, gfp_mask);
248 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
1da177e4
LT
249 if (shrink_ret == -1)
250 break;
b15e0905
AM
251 if (shrink_ret < nr_before)
252 ret += nr_before - shrink_ret;
f8891e5e 253 count_vm_events(SLABS_SCANNED, this_scan);
1da177e4
LT
254 total_scan -= this_scan;
255
256 cond_resched();
257 }
258
259 shrinker->nr += total_scan;
260 }
261 up_read(&shrinker_rwsem);
b15e0905 262 return ret;
1da177e4
LT
263}
264
265/* Called without lock on whether page is mapped, so answer is unstable */
266static inline int page_mapping_inuse(struct page *page)
267{
268 struct address_space *mapping;
269
270 /* Page is in somebody's page tables. */
271 if (page_mapped(page))
272 return 1;
273
274 /* Be more reluctant to reclaim swapcache than pagecache */
275 if (PageSwapCache(page))
276 return 1;
277
278 mapping = page_mapping(page);
279 if (!mapping)
280 return 0;
281
282 /* File is mmap'd by somebody? */
283 return mapping_mapped(mapping);
284}
285
286static inline int is_page_cache_freeable(struct page *page)
287{
ceddc3a5
JW
288 /*
289 * A freeable page cache page is referenced only by the caller
290 * that isolated the page, the page cache radix tree and
291 * optional buffer heads at page->private.
292 */
edcf4748 293 return page_count(page) - page_has_private(page) == 2;
1da177e4
LT
294}
295
296static int may_write_to_queue(struct backing_dev_info *bdi)
297{
930d9152 298 if (current->flags & PF_SWAPWRITE)
1da177e4
LT
299 return 1;
300 if (!bdi_write_congested(bdi))
301 return 1;
302 if (bdi == current->backing_dev_info)
303 return 1;
304 return 0;
305}
306
307/*
308 * We detected a synchronous write error writing a page out. Probably
309 * -ENOSPC. We need to propagate that into the address_space for a subsequent
310 * fsync(), msync() or close().
311 *
312 * The tricky part is that after writepage we cannot touch the mapping: nothing
313 * prevents it from being freed up. But we have a ref on the page and once
314 * that page is locked, the mapping is pinned.
315 *
316 * We're allowed to run sleeping lock_page() here because we know the caller has
317 * __GFP_FS.
318 */
319static void handle_write_error(struct address_space *mapping,
320 struct page *page, int error)
321{
322 lock_page(page);
3e9f45bd
GC
323 if (page_mapping(page) == mapping)
324 mapping_set_error(mapping, error);
1da177e4
LT
325 unlock_page(page);
326}
327
c661b078
AW
328/* Request for sync pageout. */
329enum pageout_io {
330 PAGEOUT_IO_ASYNC,
331 PAGEOUT_IO_SYNC,
332};
333
04e62a29
CL
334/* possible outcome of pageout() */
335typedef enum {
336 /* failed to write page out, page is locked */
337 PAGE_KEEP,
338 /* move page to the active list, page is locked */
339 PAGE_ACTIVATE,
340 /* page has been sent to the disk successfully, page is unlocked */
341 PAGE_SUCCESS,
342 /* page is clean and locked */
343 PAGE_CLEAN,
344} pageout_t;
345
1da177e4 346/*
1742f19f
AM
347 * pageout is called by shrink_page_list() for each dirty page.
348 * Calls ->writepage().
1da177e4 349 */
c661b078
AW
350static pageout_t pageout(struct page *page, struct address_space *mapping,
351 enum pageout_io sync_writeback)
1da177e4
LT
352{
353 /*
354 * If the page is dirty, only perform writeback if that write
355 * will be non-blocking. To prevent this allocation from being
356 * stalled by pagecache activity. But note that there may be
357 * stalls if we need to run get_block(). We could test
358 * PagePrivate for that.
359 *
6aceb53b 360 * If this process is currently in __generic_file_aio_write() against
1da177e4
LT
361 * this page's queue, we can perform writeback even if that
362 * will block.
363 *
364 * If the page is swapcache, write it back even if that would
365 * block, for some throttling. This happens by accident, because
366 * swap_backing_dev_info is bust: it doesn't reflect the
367 * congestion state of the swapdevs. Easy to fix, if needed.
1da177e4
LT
368 */
369 if (!is_page_cache_freeable(page))
370 return PAGE_KEEP;
371 if (!mapping) {
372 /*
373 * Some data journaling orphaned pages can have
374 * page->mapping == NULL while being dirty with clean buffers.
375 */
266cf658 376 if (page_has_private(page)) {
1da177e4
LT
377 if (try_to_free_buffers(page)) {
378 ClearPageDirty(page);
d40cee24 379 printk("%s: orphaned page\n", __func__);
1da177e4
LT
380 return PAGE_CLEAN;
381 }
382 }
383 return PAGE_KEEP;
384 }
385 if (mapping->a_ops->writepage == NULL)
386 return PAGE_ACTIVATE;
387 if (!may_write_to_queue(mapping->backing_dev_info))
388 return PAGE_KEEP;
389
390 if (clear_page_dirty_for_io(page)) {
391 int res;
392 struct writeback_control wbc = {
393 .sync_mode = WB_SYNC_NONE,
394 .nr_to_write = SWAP_CLUSTER_MAX,
111ebb6e
OH
395 .range_start = 0,
396 .range_end = LLONG_MAX,
1da177e4
LT
397 .nonblocking = 1,
398 .for_reclaim = 1,
399 };
400
401 SetPageReclaim(page);
402 res = mapping->a_ops->writepage(page, &wbc);
403 if (res < 0)
404 handle_write_error(mapping, page, res);
994fc28c 405 if (res == AOP_WRITEPAGE_ACTIVATE) {
1da177e4
LT
406 ClearPageReclaim(page);
407 return PAGE_ACTIVATE;
408 }
c661b078
AW
409
410 /*
411 * Wait on writeback if requested to. This happens when
412 * direct reclaiming a large contiguous area and the
413 * first attempt to free a range of pages fails.
414 */
415 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
416 wait_on_page_writeback(page);
417
1da177e4
LT
418 if (!PageWriteback(page)) {
419 /* synchronous write or broken a_ops? */
420 ClearPageReclaim(page);
421 }
e129b5c2 422 inc_zone_page_state(page, NR_VMSCAN_WRITE);
1da177e4
LT
423 return PAGE_SUCCESS;
424 }
425
426 return PAGE_CLEAN;
427}
428
a649fd92 429/*
e286781d
NP
430 * Same as remove_mapping, but if the page is removed from the mapping, it
431 * gets returned with a refcount of 0.
a649fd92 432 */
e286781d 433static int __remove_mapping(struct address_space *mapping, struct page *page)
49d2e9cc 434{
28e4d965
NP
435 BUG_ON(!PageLocked(page));
436 BUG_ON(mapping != page_mapping(page));
49d2e9cc 437
19fd6231 438 spin_lock_irq(&mapping->tree_lock);
49d2e9cc 439 /*
0fd0e6b0
NP
440 * The non racy check for a busy page.
441 *
442 * Must be careful with the order of the tests. When someone has
443 * a ref to the page, it may be possible that they dirty it then
444 * drop the reference. So if PageDirty is tested before page_count
445 * here, then the following race may occur:
446 *
447 * get_user_pages(&page);
448 * [user mapping goes away]
449 * write_to(page);
450 * !PageDirty(page) [good]
451 * SetPageDirty(page);
452 * put_page(page);
453 * !page_count(page) [good, discard it]
454 *
455 * [oops, our write_to data is lost]
456 *
457 * Reversing the order of the tests ensures such a situation cannot
458 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
459 * load is not satisfied before that of page->_count.
460 *
461 * Note that if SetPageDirty is always performed via set_page_dirty,
462 * and thus under tree_lock, then this ordering is not required.
49d2e9cc 463 */
e286781d 464 if (!page_freeze_refs(page, 2))
49d2e9cc 465 goto cannot_free;
e286781d
NP
466 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
467 if (unlikely(PageDirty(page))) {
468 page_unfreeze_refs(page, 2);
49d2e9cc 469 goto cannot_free;
e286781d 470 }
49d2e9cc
CL
471
472 if (PageSwapCache(page)) {
473 swp_entry_t swap = { .val = page_private(page) };
474 __delete_from_swap_cache(page);
19fd6231 475 spin_unlock_irq(&mapping->tree_lock);
cb4b86ba 476 swapcache_free(swap, page);
e286781d
NP
477 } else {
478 __remove_from_page_cache(page);
19fd6231 479 spin_unlock_irq(&mapping->tree_lock);
e767e056 480 mem_cgroup_uncharge_cache_page(page);
49d2e9cc
CL
481 }
482
49d2e9cc
CL
483 return 1;
484
485cannot_free:
19fd6231 486 spin_unlock_irq(&mapping->tree_lock);
49d2e9cc
CL
487 return 0;
488}
489
e286781d
NP
490/*
491 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
492 * someone else has a ref on the page, abort and return 0. If it was
493 * successfully detached, return 1. Assumes the caller has a single ref on
494 * this page.
495 */
496int remove_mapping(struct address_space *mapping, struct page *page)
497{
498 if (__remove_mapping(mapping, page)) {
499 /*
500 * Unfreezing the refcount with 1 rather than 2 effectively
501 * drops the pagecache ref for us without requiring another
502 * atomic operation.
503 */
504 page_unfreeze_refs(page, 1);
505 return 1;
506 }
507 return 0;
508}
509
894bc310
LS
510/**
511 * putback_lru_page - put previously isolated page onto appropriate LRU list
512 * @page: page to be put back to appropriate lru list
513 *
514 * Add previously isolated @page to appropriate LRU list.
515 * Page may still be unevictable for other reasons.
516 *
517 * lru_lock must not be held, interrupts must be enabled.
518 */
894bc310
LS
519void putback_lru_page(struct page *page)
520{
521 int lru;
522 int active = !!TestClearPageActive(page);
bbfd28ee 523 int was_unevictable = PageUnevictable(page);
894bc310
LS
524
525 VM_BUG_ON(PageLRU(page));
526
527redo:
528 ClearPageUnevictable(page);
529
530 if (page_evictable(page, NULL)) {
531 /*
532 * For evictable pages, we can use the cache.
533 * In event of a race, worst case is we end up with an
534 * unevictable page on [in]active list.
535 * We know how to handle that.
536 */
401a8e1c 537 lru = active + page_lru_base_type(page);
894bc310
LS
538 lru_cache_add_lru(page, lru);
539 } else {
540 /*
541 * Put unevictable pages directly on zone's unevictable
542 * list.
543 */
544 lru = LRU_UNEVICTABLE;
545 add_page_to_unevictable_list(page);
6a7b9548
JW
546 /*
547 * When racing with an mlock clearing (page is
548 * unlocked), make sure that if the other thread does
549 * not observe our setting of PG_lru and fails
550 * isolation, we see PG_mlocked cleared below and move
551 * the page back to the evictable list.
552 *
553 * The other side is TestClearPageMlocked().
554 */
555 smp_mb();
894bc310 556 }
894bc310
LS
557
558 /*
559 * page's status can change while we move it among lru. If an evictable
560 * page is on unevictable list, it never be freed. To avoid that,
561 * check after we added it to the list, again.
562 */
563 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
564 if (!isolate_lru_page(page)) {
565 put_page(page);
566 goto redo;
567 }
568 /* This means someone else dropped this page from LRU
569 * So, it will be freed or putback to LRU again. There is
570 * nothing to do here.
571 */
572 }
573
bbfd28ee
LS
574 if (was_unevictable && lru != LRU_UNEVICTABLE)
575 count_vm_event(UNEVICTABLE_PGRESCUED);
576 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
577 count_vm_event(UNEVICTABLE_PGCULLED);
578
894bc310
LS
579 put_page(page); /* drop ref from isolate */
580}
581
1da177e4 582/*
1742f19f 583 * shrink_page_list() returns the number of reclaimed pages
1da177e4 584 */
1742f19f 585static unsigned long shrink_page_list(struct list_head *page_list,
c661b078
AW
586 struct scan_control *sc,
587 enum pageout_io sync_writeback)
1da177e4
LT
588{
589 LIST_HEAD(ret_pages);
590 struct pagevec freed_pvec;
591 int pgactivate = 0;
05ff5137 592 unsigned long nr_reclaimed = 0;
6fe6b7e3 593 unsigned long vm_flags;
1da177e4
LT
594
595 cond_resched();
596
597 pagevec_init(&freed_pvec, 1);
598 while (!list_empty(page_list)) {
599 struct address_space *mapping;
600 struct page *page;
601 int may_enter_fs;
602 int referenced;
603
604 cond_resched();
605
606 page = lru_to_page(page_list);
607 list_del(&page->lru);
608
529ae9aa 609 if (!trylock_page(page))
1da177e4
LT
610 goto keep;
611
725d704e 612 VM_BUG_ON(PageActive(page));
1da177e4
LT
613
614 sc->nr_scanned++;
80e43426 615
b291f000
NP
616 if (unlikely(!page_evictable(page, NULL)))
617 goto cull_mlocked;
894bc310 618
a6dc60f8 619 if (!sc->may_unmap && page_mapped(page))
80e43426
CL
620 goto keep_locked;
621
1da177e4
LT
622 /* Double the slab pressure for mapped and swapcache pages */
623 if (page_mapped(page) || PageSwapCache(page))
624 sc->nr_scanned++;
625
c661b078
AW
626 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
627 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
628
629 if (PageWriteback(page)) {
630 /*
631 * Synchronous reclaim is performed in two passes,
632 * first an asynchronous pass over the list to
633 * start parallel writeback, and a second synchronous
634 * pass to wait for the IO to complete. Wait here
635 * for any page for which writeback has already
636 * started.
637 */
638 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
639 wait_on_page_writeback(page);
4dd4b920 640 else
c661b078
AW
641 goto keep_locked;
642 }
1da177e4 643
6fe6b7e3
WF
644 referenced = page_referenced(page, 1,
645 sc->mem_cgroup, &vm_flags);
03ef83af
MK
646 /*
647 * In active use or really unfreeable? Activate it.
648 * If page which have PG_mlocked lost isoltation race,
649 * try_to_unmap moves it to unevictable list
650 */
5ad333eb 651 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
03ef83af
MK
652 referenced && page_mapping_inuse(page)
653 && !(vm_flags & VM_LOCKED))
1da177e4
LT
654 goto activate_locked;
655
1da177e4
LT
656 /*
657 * Anonymous process memory has backing store?
658 * Try to allocate it some swap space here.
659 */
b291f000 660 if (PageAnon(page) && !PageSwapCache(page)) {
63eb6b93
HD
661 if (!(sc->gfp_mask & __GFP_IO))
662 goto keep_locked;
ac47b003 663 if (!add_to_swap(page))
1da177e4 664 goto activate_locked;
63eb6b93 665 may_enter_fs = 1;
b291f000 666 }
1da177e4
LT
667
668 mapping = page_mapping(page);
1da177e4
LT
669
670 /*
671 * The page is mapped into the page tables of one or more
672 * processes. Try to unmap it here.
673 */
674 if (page_mapped(page) && mapping) {
14fa31b8 675 switch (try_to_unmap(page, TTU_UNMAP)) {
1da177e4
LT
676 case SWAP_FAIL:
677 goto activate_locked;
678 case SWAP_AGAIN:
679 goto keep_locked;
b291f000
NP
680 case SWAP_MLOCK:
681 goto cull_mlocked;
1da177e4
LT
682 case SWAP_SUCCESS:
683 ; /* try to free the page below */
684 }
685 }
686
687 if (PageDirty(page)) {
5ad333eb 688 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
1da177e4 689 goto keep_locked;
4dd4b920 690 if (!may_enter_fs)
1da177e4 691 goto keep_locked;
52a8363e 692 if (!sc->may_writepage)
1da177e4
LT
693 goto keep_locked;
694
695 /* Page is dirty, try to write it out here */
c661b078 696 switch (pageout(page, mapping, sync_writeback)) {
1da177e4
LT
697 case PAGE_KEEP:
698 goto keep_locked;
699 case PAGE_ACTIVATE:
700 goto activate_locked;
701 case PAGE_SUCCESS:
4dd4b920 702 if (PageWriteback(page) || PageDirty(page))
1da177e4
LT
703 goto keep;
704 /*
705 * A synchronous write - probably a ramdisk. Go
706 * ahead and try to reclaim the page.
707 */
529ae9aa 708 if (!trylock_page(page))
1da177e4
LT
709 goto keep;
710 if (PageDirty(page) || PageWriteback(page))
711 goto keep_locked;
712 mapping = page_mapping(page);
713 case PAGE_CLEAN:
714 ; /* try to free the page below */
715 }
716 }
717
718 /*
719 * If the page has buffers, try to free the buffer mappings
720 * associated with this page. If we succeed we try to free
721 * the page as well.
722 *
723 * We do this even if the page is PageDirty().
724 * try_to_release_page() does not perform I/O, but it is
725 * possible for a page to have PageDirty set, but it is actually
726 * clean (all its buffers are clean). This happens if the
727 * buffers were written out directly, with submit_bh(). ext3
894bc310 728 * will do this, as well as the blockdev mapping.
1da177e4
LT
729 * try_to_release_page() will discover that cleanness and will
730 * drop the buffers and mark the page clean - it can be freed.
731 *
732 * Rarely, pages can have buffers and no ->mapping. These are
733 * the pages which were not successfully invalidated in
734 * truncate_complete_page(). We try to drop those buffers here
735 * and if that worked, and the page is no longer mapped into
736 * process address space (page_count == 1) it can be freed.
737 * Otherwise, leave the page on the LRU so it is swappable.
738 */
266cf658 739 if (page_has_private(page)) {
1da177e4
LT
740 if (!try_to_release_page(page, sc->gfp_mask))
741 goto activate_locked;
e286781d
NP
742 if (!mapping && page_count(page) == 1) {
743 unlock_page(page);
744 if (put_page_testzero(page))
745 goto free_it;
746 else {
747 /*
748 * rare race with speculative reference.
749 * the speculative reference will free
750 * this page shortly, so we may
751 * increment nr_reclaimed here (and
752 * leave it off the LRU).
753 */
754 nr_reclaimed++;
755 continue;
756 }
757 }
1da177e4
LT
758 }
759
e286781d 760 if (!mapping || !__remove_mapping(mapping, page))
49d2e9cc 761 goto keep_locked;
1da177e4 762
a978d6f5
NP
763 /*
764 * At this point, we have no other references and there is
765 * no way to pick any more up (removed from LRU, removed
766 * from pagecache). Can use non-atomic bitops now (and
767 * we obviously don't have to worry about waking up a process
768 * waiting on the page lock, because there are no references.
769 */
770 __clear_page_locked(page);
e286781d 771free_it:
05ff5137 772 nr_reclaimed++;
e286781d
NP
773 if (!pagevec_add(&freed_pvec, page)) {
774 __pagevec_free(&freed_pvec);
775 pagevec_reinit(&freed_pvec);
776 }
1da177e4
LT
777 continue;
778
b291f000 779cull_mlocked:
63d6c5ad
HD
780 if (PageSwapCache(page))
781 try_to_free_swap(page);
b291f000
NP
782 unlock_page(page);
783 putback_lru_page(page);
784 continue;
785
1da177e4 786activate_locked:
68a22394
RR
787 /* Not a candidate for swapping, so reclaim swap space. */
788 if (PageSwapCache(page) && vm_swap_full())
a2c43eed 789 try_to_free_swap(page);
894bc310 790 VM_BUG_ON(PageActive(page));
1da177e4
LT
791 SetPageActive(page);
792 pgactivate++;
793keep_locked:
794 unlock_page(page);
795keep:
796 list_add(&page->lru, &ret_pages);
b291f000 797 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
1da177e4
LT
798 }
799 list_splice(&ret_pages, page_list);
800 if (pagevec_count(&freed_pvec))
e286781d 801 __pagevec_free(&freed_pvec);
f8891e5e 802 count_vm_events(PGACTIVATE, pgactivate);
05ff5137 803 return nr_reclaimed;
1da177e4
LT
804}
805
5ad333eb
AW
806/* LRU Isolation modes. */
807#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
808#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
809#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
810
811/*
812 * Attempt to remove the specified page from its LRU. Only take this page
813 * if it is of the appropriate PageActive status. Pages which are being
814 * freed elsewhere are also ignored.
815 *
816 * page: page to consider
817 * mode: one of the LRU isolation modes defined above
818 *
819 * returns 0 on success, -ve errno on failure.
820 */
4f98a2fe 821int __isolate_lru_page(struct page *page, int mode, int file)
5ad333eb
AW
822{
823 int ret = -EINVAL;
824
825 /* Only take pages on the LRU. */
826 if (!PageLRU(page))
827 return ret;
828
829 /*
830 * When checking the active state, we need to be sure we are
831 * dealing with comparible boolean values. Take the logical not
832 * of each.
833 */
834 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
835 return ret;
836
6c0b1351 837 if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
4f98a2fe
RR
838 return ret;
839
894bc310
LS
840 /*
841 * When this function is being called for lumpy reclaim, we
842 * initially look into all LRU pages, active, inactive and
843 * unevictable; only give shrink_page_list evictable pages.
844 */
845 if (PageUnevictable(page))
846 return ret;
847
5ad333eb 848 ret = -EBUSY;
08e552c6 849
5ad333eb
AW
850 if (likely(get_page_unless_zero(page))) {
851 /*
852 * Be careful not to clear PageLRU until after we're
853 * sure the page is not being freed elsewhere -- the
854 * page release code relies on it.
855 */
856 ClearPageLRU(page);
857 ret = 0;
858 }
859
860 return ret;
861}
862
1da177e4
LT
863/*
864 * zone->lru_lock is heavily contended. Some of the functions that
865 * shrink the lists perform better by taking out a batch of pages
866 * and working on them outside the LRU lock.
867 *
868 * For pagecache intensive workloads, this function is the hottest
869 * spot in the kernel (apart from copy_*_user functions).
870 *
871 * Appropriate locks must be held before calling this function.
872 *
873 * @nr_to_scan: The number of pages to look through on the list.
874 * @src: The LRU list to pull pages off.
875 * @dst: The temp list to put pages on to.
876 * @scanned: The number of pages that were scanned.
5ad333eb
AW
877 * @order: The caller's attempted allocation order
878 * @mode: One of the LRU isolation modes
4f98a2fe 879 * @file: True [1] if isolating file [!anon] pages
1da177e4
LT
880 *
881 * returns how many pages were moved onto *@dst.
882 */
69e05944
AM
883static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
884 struct list_head *src, struct list_head *dst,
4f98a2fe 885 unsigned long *scanned, int order, int mode, int file)
1da177e4 886{
69e05944 887 unsigned long nr_taken = 0;
c9b02d97 888 unsigned long scan;
1da177e4 889
c9b02d97 890 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
5ad333eb
AW
891 struct page *page;
892 unsigned long pfn;
893 unsigned long end_pfn;
894 unsigned long page_pfn;
895 int zone_id;
896
1da177e4
LT
897 page = lru_to_page(src);
898 prefetchw_prev_lru_page(page, src, flags);
899
725d704e 900 VM_BUG_ON(!PageLRU(page));
8d438f96 901
4f98a2fe 902 switch (__isolate_lru_page(page, mode, file)) {
5ad333eb
AW
903 case 0:
904 list_move(&page->lru, dst);
2ffebca6 905 mem_cgroup_del_lru(page);
7c8ee9a8 906 nr_taken++;
5ad333eb
AW
907 break;
908
909 case -EBUSY:
910 /* else it is being freed elsewhere */
911 list_move(&page->lru, src);
2ffebca6 912 mem_cgroup_rotate_lru_list(page, page_lru(page));
5ad333eb 913 continue;
46453a6e 914
5ad333eb
AW
915 default:
916 BUG();
917 }
918
919 if (!order)
920 continue;
921
922 /*
923 * Attempt to take all pages in the order aligned region
924 * surrounding the tag page. Only take those pages of
925 * the same active state as that tag page. We may safely
926 * round the target page pfn down to the requested order
927 * as the mem_map is guarenteed valid out to MAX_ORDER,
928 * where that page is in a different zone we will detect
929 * it from its zone id and abort this block scan.
930 */
931 zone_id = page_zone_id(page);
932 page_pfn = page_to_pfn(page);
933 pfn = page_pfn & ~((1 << order) - 1);
934 end_pfn = pfn + (1 << order);
935 for (; pfn < end_pfn; pfn++) {
936 struct page *cursor_page;
937
938 /* The target page is in the block, ignore it. */
939 if (unlikely(pfn == page_pfn))
940 continue;
941
942 /* Avoid holes within the zone. */
943 if (unlikely(!pfn_valid_within(pfn)))
944 break;
945
946 cursor_page = pfn_to_page(pfn);
4f98a2fe 947
5ad333eb
AW
948 /* Check that we have not crossed a zone boundary. */
949 if (unlikely(page_zone_id(cursor_page) != zone_id))
950 continue;
de2e7567
MK
951
952 /*
953 * If we don't have enough swap space, reclaiming of
954 * anon page which don't already have a swap slot is
955 * pointless.
956 */
957 if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
958 !PageSwapCache(cursor_page))
959 continue;
960
ee993b13 961 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
5ad333eb 962 list_move(&cursor_page->lru, dst);
cb4cbcf6 963 mem_cgroup_del_lru(cursor_page);
5ad333eb
AW
964 nr_taken++;
965 scan++;
5ad333eb
AW
966 }
967 }
1da177e4
LT
968 }
969
970 *scanned = scan;
971 return nr_taken;
972}
973
66e1707b
BS
974static unsigned long isolate_pages_global(unsigned long nr,
975 struct list_head *dst,
976 unsigned long *scanned, int order,
977 int mode, struct zone *z,
978 struct mem_cgroup *mem_cont,
4f98a2fe 979 int active, int file)
66e1707b 980{
4f98a2fe 981 int lru = LRU_BASE;
66e1707b 982 if (active)
4f98a2fe
RR
983 lru += LRU_ACTIVE;
984 if (file)
985 lru += LRU_FILE;
986 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
b7c46d15 987 mode, file);
66e1707b
BS
988}
989
5ad333eb
AW
990/*
991 * clear_active_flags() is a helper for shrink_active_list(), clearing
992 * any active bits from the pages in the list.
993 */
4f98a2fe
RR
994static unsigned long clear_active_flags(struct list_head *page_list,
995 unsigned int *count)
5ad333eb
AW
996{
997 int nr_active = 0;
4f98a2fe 998 int lru;
5ad333eb
AW
999 struct page *page;
1000
4f98a2fe 1001 list_for_each_entry(page, page_list, lru) {
401a8e1c 1002 lru = page_lru_base_type(page);
5ad333eb 1003 if (PageActive(page)) {
4f98a2fe 1004 lru += LRU_ACTIVE;
5ad333eb
AW
1005 ClearPageActive(page);
1006 nr_active++;
1007 }
4f98a2fe
RR
1008 count[lru]++;
1009 }
5ad333eb
AW
1010
1011 return nr_active;
1012}
1013
62695a84
NP
1014/**
1015 * isolate_lru_page - tries to isolate a page from its LRU list
1016 * @page: page to isolate from its LRU list
1017 *
1018 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1019 * vmstat statistic corresponding to whatever LRU list the page was on.
1020 *
1021 * Returns 0 if the page was removed from an LRU list.
1022 * Returns -EBUSY if the page was not on an LRU list.
1023 *
1024 * The returned page will have PageLRU() cleared. If it was found on
894bc310
LS
1025 * the active list, it will have PageActive set. If it was found on
1026 * the unevictable list, it will have the PageUnevictable bit set. That flag
1027 * may need to be cleared by the caller before letting the page go.
62695a84
NP
1028 *
1029 * The vmstat statistic corresponding to the list on which the page was
1030 * found will be decremented.
1031 *
1032 * Restrictions:
1033 * (1) Must be called with an elevated refcount on the page. This is a
1034 * fundamentnal difference from isolate_lru_pages (which is called
1035 * without a stable reference).
1036 * (2) the lru_lock must not be held.
1037 * (3) interrupts must be enabled.
1038 */
1039int isolate_lru_page(struct page *page)
1040{
1041 int ret = -EBUSY;
1042
1043 if (PageLRU(page)) {
1044 struct zone *zone = page_zone(page);
1045
1046 spin_lock_irq(&zone->lru_lock);
1047 if (PageLRU(page) && get_page_unless_zero(page)) {
894bc310 1048 int lru = page_lru(page);
62695a84
NP
1049 ret = 0;
1050 ClearPageLRU(page);
4f98a2fe 1051
4f98a2fe 1052 del_page_from_lru_list(zone, page, lru);
62695a84
NP
1053 }
1054 spin_unlock_irq(&zone->lru_lock);
1055 }
1056 return ret;
1057}
1058
35cd7815
RR
1059/*
1060 * Are there way too many processes in the direct reclaim path already?
1061 */
1062static int too_many_isolated(struct zone *zone, int file,
1063 struct scan_control *sc)
1064{
1065 unsigned long inactive, isolated;
1066
1067 if (current_is_kswapd())
1068 return 0;
1069
1070 if (!scanning_global_lru(sc))
1071 return 0;
1072
1073 if (file) {
1074 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1075 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1076 } else {
1077 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1078 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1079 }
1080
1081 return isolated > inactive;
1082}
1083
1da177e4 1084/*
1742f19f
AM
1085 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1086 * of reclaimed pages
1da177e4 1087 */
1742f19f 1088static unsigned long shrink_inactive_list(unsigned long max_scan,
33c120ed
RR
1089 struct zone *zone, struct scan_control *sc,
1090 int priority, int file)
1da177e4
LT
1091{
1092 LIST_HEAD(page_list);
1093 struct pagevec pvec;
69e05944 1094 unsigned long nr_scanned = 0;
05ff5137 1095 unsigned long nr_reclaimed = 0;
6e901571 1096 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
78dc583d
KM
1097 int lumpy_reclaim = 0;
1098
35cd7815 1099 while (unlikely(too_many_isolated(zone, file, sc))) {
58355c78 1100 congestion_wait(BLK_RW_ASYNC, HZ/10);
35cd7815
RR
1101
1102 /* We are about to die and free our memory. Return now. */
1103 if (fatal_signal_pending(current))
1104 return SWAP_CLUSTER_MAX;
1105 }
1106
78dc583d
KM
1107 /*
1108 * If we need a large contiguous chunk of memory, or have
1109 * trouble getting a small set of contiguous pages, we
1110 * will reclaim both active and inactive pages.
1111 *
1112 * We use the same threshold as pageout congestion_wait below.
1113 */
1114 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1115 lumpy_reclaim = 1;
1116 else if (sc->order && priority < DEF_PRIORITY - 2)
1117 lumpy_reclaim = 1;
1da177e4
LT
1118
1119 pagevec_init(&pvec, 1);
1120
1121 lru_add_drain();
1122 spin_lock_irq(&zone->lru_lock);
69e05944 1123 do {
1da177e4 1124 struct page *page;
69e05944
AM
1125 unsigned long nr_taken;
1126 unsigned long nr_scan;
1127 unsigned long nr_freed;
5ad333eb 1128 unsigned long nr_active;
4f98a2fe 1129 unsigned int count[NR_LRU_LISTS] = { 0, };
78dc583d 1130 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
a731286d
KM
1131 unsigned long nr_anon;
1132 unsigned long nr_file;
1da177e4 1133
ece74b2e 1134 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
4f98a2fe
RR
1135 &page_list, &nr_scan, sc->order, mode,
1136 zone, sc->mem_cgroup, 0, file);
b35ea17b
KM
1137
1138 if (scanning_global_lru(sc)) {
1139 zone->pages_scanned += nr_scan;
1140 if (current_is_kswapd())
1141 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1142 nr_scan);
1143 else
1144 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1145 nr_scan);
1146 }
1147
1148 if (nr_taken == 0)
1149 goto done;
1150
4f98a2fe 1151 nr_active = clear_active_flags(&page_list, count);
e9187bdc 1152 __count_vm_events(PGDEACTIVATE, nr_active);
5ad333eb 1153
4f98a2fe
RR
1154 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1155 -count[LRU_ACTIVE_FILE]);
1156 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1157 -count[LRU_INACTIVE_FILE]);
1158 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1159 -count[LRU_ACTIVE_ANON]);
1160 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1161 -count[LRU_INACTIVE_ANON]);
1162
a731286d
KM
1163 nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1164 nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1165 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
1166 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
3e2f41f1
KM
1167
1168 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1169 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1170 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1171 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1172
1da177e4
LT
1173 spin_unlock_irq(&zone->lru_lock);
1174
69e05944 1175 nr_scanned += nr_scan;
c661b078
AW
1176 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1177
1178 /*
1179 * If we are direct reclaiming for contiguous pages and we do
1180 * not reclaim everything in the list, try again and wait
1181 * for IO to complete. This will stall high-order allocations
1182 * but that should be acceptable to the caller
1183 */
1184 if (nr_freed < nr_taken && !current_is_kswapd() &&
78dc583d 1185 lumpy_reclaim) {
8aa7e847 1186 congestion_wait(BLK_RW_ASYNC, HZ/10);
c661b078
AW
1187
1188 /*
1189 * The attempt at page out may have made some
1190 * of the pages active, mark them inactive again.
1191 */
4f98a2fe 1192 nr_active = clear_active_flags(&page_list, count);
c661b078
AW
1193 count_vm_events(PGDEACTIVATE, nr_active);
1194
1195 nr_freed += shrink_page_list(&page_list, sc,
1196 PAGEOUT_IO_SYNC);
1197 }
1198
05ff5137 1199 nr_reclaimed += nr_freed;
b35ea17b 1200
a74609fa 1201 local_irq_disable();
b35ea17b 1202 if (current_is_kswapd())
f8891e5e 1203 __count_vm_events(KSWAPD_STEAL, nr_freed);
918d3f90 1204 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
a74609fa
NP
1205
1206 spin_lock(&zone->lru_lock);
1da177e4
LT
1207 /*
1208 * Put back any unfreeable pages.
1209 */
1210 while (!list_empty(&page_list)) {
894bc310 1211 int lru;
1da177e4 1212 page = lru_to_page(&page_list);
725d704e 1213 VM_BUG_ON(PageLRU(page));
1da177e4 1214 list_del(&page->lru);
894bc310
LS
1215 if (unlikely(!page_evictable(page, NULL))) {
1216 spin_unlock_irq(&zone->lru_lock);
1217 putback_lru_page(page);
1218 spin_lock_irq(&zone->lru_lock);
1219 continue;
1220 }
1221 SetPageLRU(page);
1222 lru = page_lru(page);
1223 add_page_to_lru_list(zone, page, lru);
74a1c48f 1224 if (is_active_lru(lru)) {
b7c46d15 1225 int file = is_file_lru(lru);
6e901571 1226 reclaim_stat->recent_rotated[file]++;
4f98a2fe 1227 }
1da177e4
LT
1228 if (!pagevec_add(&pvec, page)) {
1229 spin_unlock_irq(&zone->lru_lock);
1230 __pagevec_release(&pvec);
1231 spin_lock_irq(&zone->lru_lock);
1232 }
1233 }
a731286d
KM
1234 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1235 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1236
69e05944 1237 } while (nr_scanned < max_scan);
b35ea17b 1238
1da177e4 1239done:
b35ea17b 1240 spin_unlock_irq(&zone->lru_lock);
1da177e4 1241 pagevec_release(&pvec);
05ff5137 1242 return nr_reclaimed;
1da177e4
LT
1243}
1244
3bb1a852
MB
1245/*
1246 * We are about to scan this zone at a certain priority level. If that priority
1247 * level is smaller (ie: more urgent) than the previous priority, then note
1248 * that priority level within the zone. This is done so that when the next
1249 * process comes in to scan this zone, it will immediately start out at this
1250 * priority level rather than having to build up its own scanning priority.
1251 * Here, this priority affects only the reclaim-mapped threshold.
1252 */
1253static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1254{
1255 if (priority < zone->prev_priority)
1256 zone->prev_priority = priority;
1257}
1258
1da177e4
LT
1259/*
1260 * This moves pages from the active list to the inactive list.
1261 *
1262 * We move them the other way if the page is referenced by one or more
1263 * processes, from rmap.
1264 *
1265 * If the pages are mostly unmapped, the processing is fast and it is
1266 * appropriate to hold zone->lru_lock across the whole operation. But if
1267 * the pages are mapped, the processing is slow (page_referenced()) so we
1268 * should drop zone->lru_lock around each page. It's impossible to balance
1269 * this, so instead we remove the pages from the LRU while processing them.
1270 * It is safe to rely on PG_active against the non-LRU pages in here because
1271 * nobody will play with that bit on a non-LRU page.
1272 *
1273 * The downside is that we have to touch page->_count against each page.
1274 * But we had to alter page->flags anyway.
1275 */
1cfb419b 1276
3eb4140f
WF
1277static void move_active_pages_to_lru(struct zone *zone,
1278 struct list_head *list,
1279 enum lru_list lru)
1280{
1281 unsigned long pgmoved = 0;
1282 struct pagevec pvec;
1283 struct page *page;
1284
1285 pagevec_init(&pvec, 1);
1286
1287 while (!list_empty(list)) {
1288 page = lru_to_page(list);
3eb4140f
WF
1289
1290 VM_BUG_ON(PageLRU(page));
1291 SetPageLRU(page);
1292
3eb4140f
WF
1293 list_move(&page->lru, &zone->lru[lru].list);
1294 mem_cgroup_add_lru_list(page, lru);
1295 pgmoved++;
1296
1297 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1298 spin_unlock_irq(&zone->lru_lock);
1299 if (buffer_heads_over_limit)
1300 pagevec_strip(&pvec);
1301 __pagevec_release(&pvec);
1302 spin_lock_irq(&zone->lru_lock);
1303 }
1304 }
1305 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1306 if (!is_active_lru(lru))
1307 __count_vm_events(PGDEACTIVATE, pgmoved);
1308}
1cfb419b 1309
1742f19f 1310static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
4f98a2fe 1311 struct scan_control *sc, int priority, int file)
1da177e4 1312{
44c241f1 1313 unsigned long nr_taken;
69e05944 1314 unsigned long pgscanned;
6fe6b7e3 1315 unsigned long vm_flags;
1da177e4 1316 LIST_HEAD(l_hold); /* The pages which were snipped off */
8cab4754 1317 LIST_HEAD(l_active);
b69408e8 1318 LIST_HEAD(l_inactive);
1da177e4 1319 struct page *page;
6e901571 1320 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
44c241f1 1321 unsigned long nr_rotated = 0;
1da177e4
LT
1322
1323 lru_add_drain();
1324 spin_lock_irq(&zone->lru_lock);
44c241f1 1325 nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
66e1707b 1326 ISOLATE_ACTIVE, zone,
4f98a2fe 1327 sc->mem_cgroup, 1, file);
1cfb419b
KH
1328 /*
1329 * zone->pages_scanned is used for detect zone's oom
1330 * mem_cgroup remembers nr_scan by itself.
1331 */
e72e2bd6 1332 if (scanning_global_lru(sc)) {
1cfb419b 1333 zone->pages_scanned += pgscanned;
4f98a2fe 1334 }
b7c46d15 1335 reclaim_stat->recent_scanned[file] += nr_taken;
1cfb419b 1336
3eb4140f 1337 __count_zone_vm_events(PGREFILL, zone, pgscanned);
4f98a2fe 1338 if (file)
44c241f1 1339 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
4f98a2fe 1340 else
44c241f1 1341 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
a731286d 1342 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1da177e4
LT
1343 spin_unlock_irq(&zone->lru_lock);
1344
1da177e4
LT
1345 while (!list_empty(&l_hold)) {
1346 cond_resched();
1347 page = lru_to_page(&l_hold);
1348 list_del(&page->lru);
7e9cd484 1349
894bc310
LS
1350 if (unlikely(!page_evictable(page, NULL))) {
1351 putback_lru_page(page);
1352 continue;
1353 }
1354
7e9cd484
RR
1355 /* page_referenced clears PageReferenced */
1356 if (page_mapping_inuse(page) &&
8cab4754 1357 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
44c241f1 1358 nr_rotated++;
8cab4754
WF
1359 /*
1360 * Identify referenced, file-backed active pages and
1361 * give them one more trip around the active list. So
1362 * that executable code get better chances to stay in
1363 * memory under moderate memory pressure. Anon pages
1364 * are not likely to be evicted by use-once streaming
1365 * IO, plus JVM can create lots of anon VM_EXEC pages,
1366 * so we ignore them here.
1367 */
41e20983 1368 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
8cab4754
WF
1369 list_add(&page->lru, &l_active);
1370 continue;
1371 }
1372 }
7e9cd484 1373
5205e56e 1374 ClearPageActive(page); /* we are de-activating */
1da177e4
LT
1375 list_add(&page->lru, &l_inactive);
1376 }
1377
b555749a 1378 /*
8cab4754 1379 * Move pages back to the lru list.
b555749a 1380 */
2a1dc509 1381 spin_lock_irq(&zone->lru_lock);
556adecb 1382 /*
8cab4754
WF
1383 * Count referenced pages from currently used mappings as rotated,
1384 * even though only some of them are actually re-activated. This
1385 * helps balance scan pressure between file and anonymous pages in
1386 * get_scan_ratio.
7e9cd484 1387 */
b7c46d15 1388 reclaim_stat->recent_rotated[file] += nr_rotated;
556adecb 1389
3eb4140f
WF
1390 move_active_pages_to_lru(zone, &l_active,
1391 LRU_ACTIVE + file * LRU_FILE);
1392 move_active_pages_to_lru(zone, &l_inactive,
1393 LRU_BASE + file * LRU_FILE);
a731286d 1394 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
f8891e5e 1395 spin_unlock_irq(&zone->lru_lock);
1da177e4
LT
1396}
1397
14797e23 1398static int inactive_anon_is_low_global(struct zone *zone)
f89eb90e
KM
1399{
1400 unsigned long active, inactive;
1401
1402 active = zone_page_state(zone, NR_ACTIVE_ANON);
1403 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1404
1405 if (inactive * zone->inactive_ratio < active)
1406 return 1;
1407
1408 return 0;
1409}
1410
14797e23
KM
1411/**
1412 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1413 * @zone: zone to check
1414 * @sc: scan control of this context
1415 *
1416 * Returns true if the zone does not have enough inactive anon pages,
1417 * meaning some active anon pages need to be deactivated.
1418 */
1419static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1420{
1421 int low;
1422
e72e2bd6 1423 if (scanning_global_lru(sc))
14797e23
KM
1424 low = inactive_anon_is_low_global(zone);
1425 else
c772be93 1426 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
14797e23
KM
1427 return low;
1428}
1429
56e49d21
RR
1430static int inactive_file_is_low_global(struct zone *zone)
1431{
1432 unsigned long active, inactive;
1433
1434 active = zone_page_state(zone, NR_ACTIVE_FILE);
1435 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1436
1437 return (active > inactive);
1438}
1439
1440/**
1441 * inactive_file_is_low - check if file pages need to be deactivated
1442 * @zone: zone to check
1443 * @sc: scan control of this context
1444 *
1445 * When the system is doing streaming IO, memory pressure here
1446 * ensures that active file pages get deactivated, until more
1447 * than half of the file pages are on the inactive list.
1448 *
1449 * Once we get to that situation, protect the system's working
1450 * set from being evicted by disabling active file page aging.
1451 *
1452 * This uses a different ratio than the anonymous pages, because
1453 * the page cache uses a use-once replacement algorithm.
1454 */
1455static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1456{
1457 int low;
1458
1459 if (scanning_global_lru(sc))
1460 low = inactive_file_is_low_global(zone);
1461 else
1462 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1463 return low;
1464}
1465
4f98a2fe 1466static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
b69408e8
CL
1467 struct zone *zone, struct scan_control *sc, int priority)
1468{
4f98a2fe
RR
1469 int file = is_file_lru(lru);
1470
56e49d21 1471 if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
556adecb
RR
1472 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1473 return 0;
1474 }
1475
14797e23 1476 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
4f98a2fe 1477 shrink_active_list(nr_to_scan, zone, sc, priority, file);
b69408e8
CL
1478 return 0;
1479 }
33c120ed 1480 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
4f98a2fe
RR
1481}
1482
1483/*
1484 * Determine how aggressively the anon and file LRU lists should be
1485 * scanned. The relative value of each set of LRU lists is determined
1486 * by looking at the fraction of the pages scanned we did rotate back
1487 * onto the active list instead of evict.
1488 *
1489 * percent[0] specifies how much pressure to put on ram/swap backed
1490 * memory, while percent[1] determines pressure on the file LRUs.
1491 */
1492static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1493 unsigned long *percent)
1494{
1495 unsigned long anon, file, free;
1496 unsigned long anon_prio, file_prio;
1497 unsigned long ap, fp;
6e901571 1498 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
4f98a2fe 1499
0b217676
VL
1500 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1501 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1502 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1503 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
b962716b 1504
e72e2bd6 1505 if (scanning_global_lru(sc)) {
eeee9a8c
KM
1506 free = zone_page_state(zone, NR_FREE_PAGES);
1507 /* If we have very few page cache pages,
1508 force-scan anon pages. */
41858966 1509 if (unlikely(file + free <= high_wmark_pages(zone))) {
eeee9a8c
KM
1510 percent[0] = 100;
1511 percent[1] = 0;
1512 return;
1513 }
4f98a2fe
RR
1514 }
1515
1516 /*
1517 * OK, so we have swap space and a fair amount of page cache
1518 * pages. We use the recently rotated / recently scanned
1519 * ratios to determine how valuable each cache is.
1520 *
1521 * Because workloads change over time (and to avoid overflow)
1522 * we keep these statistics as a floating average, which ends
1523 * up weighing recent references more than old ones.
1524 *
1525 * anon in [0], file in [1]
1526 */
6e901571 1527 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
4f98a2fe 1528 spin_lock_irq(&zone->lru_lock);
6e901571
KM
1529 reclaim_stat->recent_scanned[0] /= 2;
1530 reclaim_stat->recent_rotated[0] /= 2;
4f98a2fe
RR
1531 spin_unlock_irq(&zone->lru_lock);
1532 }
1533
6e901571 1534 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
4f98a2fe 1535 spin_lock_irq(&zone->lru_lock);
6e901571
KM
1536 reclaim_stat->recent_scanned[1] /= 2;
1537 reclaim_stat->recent_rotated[1] /= 2;
4f98a2fe
RR
1538 spin_unlock_irq(&zone->lru_lock);
1539 }
1540
1541 /*
1542 * With swappiness at 100, anonymous and file have the same priority.
1543 * This scanning priority is essentially the inverse of IO cost.
1544 */
1545 anon_prio = sc->swappiness;
1546 file_prio = 200 - sc->swappiness;
1547
1548 /*
00d8089c
RR
1549 * The amount of pressure on anon vs file pages is inversely
1550 * proportional to the fraction of recently scanned pages on
1551 * each list that were recently referenced and in active use.
4f98a2fe 1552 */
6e901571
KM
1553 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1554 ap /= reclaim_stat->recent_rotated[0] + 1;
4f98a2fe 1555
6e901571
KM
1556 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1557 fp /= reclaim_stat->recent_rotated[1] + 1;
4f98a2fe
RR
1558
1559 /* Normalize to percentages */
1560 percent[0] = 100 * ap / (ap + fp + 1);
1561 percent[1] = 100 - percent[0];
b69408e8
CL
1562}
1563
6e08a369
WF
1564/*
1565 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1566 * until we collected @swap_cluster_max pages to scan.
1567 */
1568static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
ece74b2e 1569 unsigned long *nr_saved_scan)
6e08a369
WF
1570{
1571 unsigned long nr;
1572
1573 *nr_saved_scan += nr_to_scan;
1574 nr = *nr_saved_scan;
1575
ece74b2e 1576 if (nr >= SWAP_CLUSTER_MAX)
6e08a369
WF
1577 *nr_saved_scan = 0;
1578 else
1579 nr = 0;
1580
1581 return nr;
1582}
4f98a2fe 1583
1da177e4
LT
1584/*
1585 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1586 */
a79311c1 1587static void shrink_zone(int priority, struct zone *zone,
05ff5137 1588 struct scan_control *sc)
1da177e4 1589{
b69408e8 1590 unsigned long nr[NR_LRU_LISTS];
8695949a 1591 unsigned long nr_to_scan;
4f98a2fe 1592 unsigned long percent[2]; /* anon @ 0; file @ 1 */
b69408e8 1593 enum lru_list l;
01dbe5c9 1594 unsigned long nr_reclaimed = sc->nr_reclaimed;
22fba335 1595 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
f8629631 1596 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
9198e96c 1597 int noswap = 0;
1da177e4 1598
9198e96c
DN
1599 /* If we have no swap space, do not bother scanning anon pages. */
1600 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1601 noswap = 1;
1602 percent[0] = 0;
1603 percent[1] = 100;
1604 } else
1605 get_scan_ratio(zone, sc, percent);
4f98a2fe 1606
894bc310 1607 for_each_evictable_lru(l) {
9439c1c9 1608 int file = is_file_lru(l);
8713e012 1609 unsigned long scan;
e0f79b8f 1610
0b217676 1611 scan = zone_nr_lru_pages(zone, sc, l);
9198e96c 1612 if (priority || noswap) {
9439c1c9
KM
1613 scan >>= priority;
1614 scan = (scan * percent[file]) / 100;
1615 }
f8629631 1616 nr[l] = nr_scan_try_batch(scan,
ece74b2e 1617 &reclaim_stat->nr_saved_scan[l]);
1cfb419b 1618 }
1da177e4 1619
556adecb
RR
1620 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1621 nr[LRU_INACTIVE_FILE]) {
894bc310 1622 for_each_evictable_lru(l) {
b69408e8 1623 if (nr[l]) {
ece74b2e
KM
1624 nr_to_scan = min_t(unsigned long,
1625 nr[l], SWAP_CLUSTER_MAX);
b69408e8 1626 nr[l] -= nr_to_scan;
1da177e4 1627
01dbe5c9
KM
1628 nr_reclaimed += shrink_list(l, nr_to_scan,
1629 zone, sc, priority);
b69408e8 1630 }
1da177e4 1631 }
a79311c1
RR
1632 /*
1633 * On large memory systems, scan >> priority can become
1634 * really large. This is fine for the starting priority;
1635 * we want to put equal scanning pressure on each zone.
1636 * However, if the VM has a harder time of freeing pages,
1637 * with multiple processes reclaiming pages, the total
1638 * freeing target can get unreasonably large.
1639 */
338fde90 1640 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
a79311c1 1641 break;
1da177e4
LT
1642 }
1643
01dbe5c9
KM
1644 sc->nr_reclaimed = nr_reclaimed;
1645
556adecb
RR
1646 /*
1647 * Even if we did not try to evict anon pages at all, we want to
1648 * rebalance the anon lru active/inactive ratio.
1649 */
69c85481 1650 if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
556adecb
RR
1651 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1652
232ea4d6 1653 throttle_vm_writeout(sc->gfp_mask);
1da177e4
LT
1654}
1655
1656/*
1657 * This is the direct reclaim path, for page-allocating processes. We only
1658 * try to reclaim pages from zones which will satisfy the caller's allocation
1659 * request.
1660 *
41858966
MG
1661 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1662 * Because:
1da177e4
LT
1663 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1664 * allocation or
41858966
MG
1665 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1666 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1667 * zone defense algorithm.
1da177e4 1668 *
1da177e4
LT
1669 * If a zone is deemed to be full of pinned pages then just give it a light
1670 * scan then give up on it.
1671 */
a79311c1 1672static void shrink_zones(int priority, struct zonelist *zonelist,
05ff5137 1673 struct scan_control *sc)
1da177e4 1674{
54a6eb5c 1675 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
dd1a239f 1676 struct zoneref *z;
54a6eb5c 1677 struct zone *zone;
1cfb419b 1678
408d8544 1679 sc->all_unreclaimable = 1;
327c0e96
KH
1680 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1681 sc->nodemask) {
f3fe6512 1682 if (!populated_zone(zone))
1da177e4 1683 continue;
1cfb419b
KH
1684 /*
1685 * Take care memory controller reclaiming has small influence
1686 * to global LRU.
1687 */
e72e2bd6 1688 if (scanning_global_lru(sc)) {
1cfb419b
KH
1689 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1690 continue;
1691 note_zone_scanning_priority(zone, priority);
1da177e4 1692
1cfb419b
KH
1693 if (zone_is_all_unreclaimable(zone) &&
1694 priority != DEF_PRIORITY)
1695 continue; /* Let kswapd poll it */
1696 sc->all_unreclaimable = 0;
1697 } else {
1698 /*
1699 * Ignore cpuset limitation here. We just want to reduce
1700 * # of used pages by us regardless of memory shortage.
1701 */
1702 sc->all_unreclaimable = 0;
1703 mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1704 priority);
1705 }
408d8544 1706
a79311c1 1707 shrink_zone(priority, zone, sc);
1da177e4
LT
1708 }
1709}
4f98a2fe 1710
1da177e4
LT
1711/*
1712 * This is the main entry point to direct page reclaim.
1713 *
1714 * If a full scan of the inactive list fails to free enough memory then we
1715 * are "out of memory" and something needs to be killed.
1716 *
1717 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1718 * high - the zone may be full of dirty or under-writeback pages, which this
5b0830cb
JA
1719 * caller can't do much about. We kick the writeback threads and take explicit
1720 * naps in the hope that some of these pages can be written. But if the
1721 * allocating task holds filesystem locks which prevent writeout this might not
1722 * work, and the allocation attempt will fail.
a41f24ea
NA
1723 *
1724 * returns: 0, if no pages reclaimed
1725 * else, the number of pages reclaimed
1da177e4 1726 */
dac1d27b 1727static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
dd1a239f 1728 struct scan_control *sc)
1da177e4
LT
1729{
1730 int priority;
c700be3d 1731 unsigned long ret = 0;
69e05944 1732 unsigned long total_scanned = 0;
1da177e4 1733 struct reclaim_state *reclaim_state = current->reclaim_state;
1da177e4 1734 unsigned long lru_pages = 0;
dd1a239f 1735 struct zoneref *z;
54a6eb5c 1736 struct zone *zone;
dd1a239f 1737 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
22fba335 1738 unsigned long writeback_threshold;
1da177e4 1739
873b4771
KK
1740 delayacct_freepages_start();
1741
e72e2bd6 1742 if (scanning_global_lru(sc))
1cfb419b
KH
1743 count_vm_event(ALLOCSTALL);
1744 /*
1745 * mem_cgroup will not do shrink_slab.
1746 */
e72e2bd6 1747 if (scanning_global_lru(sc)) {
54a6eb5c 1748 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1da177e4 1749
1cfb419b
KH
1750 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1751 continue;
1da177e4 1752
adea02a1 1753 lru_pages += zone_reclaimable_pages(zone);
1cfb419b 1754 }
1da177e4
LT
1755 }
1756
1757 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
66e1707b 1758 sc->nr_scanned = 0;
f7b7fd8f
RR
1759 if (!priority)
1760 disable_swap_token();
a79311c1 1761 shrink_zones(priority, zonelist, sc);
66e1707b
BS
1762 /*
1763 * Don't shrink slabs when reclaiming memory from
1764 * over limit cgroups
1765 */
e72e2bd6 1766 if (scanning_global_lru(sc)) {
dd1a239f 1767 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
91a45470 1768 if (reclaim_state) {
a79311c1 1769 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
91a45470
KH
1770 reclaim_state->reclaimed_slab = 0;
1771 }
1da177e4 1772 }
66e1707b 1773 total_scanned += sc->nr_scanned;
22fba335 1774 if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
a79311c1 1775 ret = sc->nr_reclaimed;
1da177e4
LT
1776 goto out;
1777 }
1778
1779 /*
1780 * Try to write back as many pages as we just scanned. This
1781 * tends to cause slow streaming writers to write data to the
1782 * disk smoothly, at the dirtying rate, which is nice. But
1783 * that's undesirable in laptop mode, where we *want* lumpy
1784 * writeout. So in laptop mode, write out the whole world.
1785 */
22fba335
KM
1786 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
1787 if (total_scanned > writeback_threshold) {
03ba3782 1788 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
66e1707b 1789 sc->may_writepage = 1;
1da177e4
LT
1790 }
1791
1792 /* Take a nap, wait for some writeback to complete */
7b51755c
KM
1793 if (!sc->hibernation_mode && sc->nr_scanned &&
1794 priority < DEF_PRIORITY - 2)
8aa7e847 1795 congestion_wait(BLK_RW_ASYNC, HZ/10);
1da177e4 1796 }
87547ee9 1797 /* top priority shrink_zones still had more to do? don't OOM, then */
e72e2bd6 1798 if (!sc->all_unreclaimable && scanning_global_lru(sc))
a79311c1 1799 ret = sc->nr_reclaimed;
1da177e4 1800out:
3bb1a852
MB
1801 /*
1802 * Now that we've scanned all the zones at this priority level, note
1803 * that level within the zone so that the next thread which performs
1804 * scanning of this zone will immediately start out at this priority
1805 * level. This affects only the decision whether or not to bring
1806 * mapped pages onto the inactive list.
1807 */
1808 if (priority < 0)
1809 priority = 0;
1da177e4 1810
e72e2bd6 1811 if (scanning_global_lru(sc)) {
54a6eb5c 1812 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1cfb419b
KH
1813
1814 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1815 continue;
1816
1817 zone->prev_priority = priority;
1818 }
1819 } else
1820 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1da177e4 1821
873b4771
KK
1822 delayacct_freepages_end();
1823
1da177e4
LT
1824 return ret;
1825}
1826
dac1d27b 1827unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 1828 gfp_t gfp_mask, nodemask_t *nodemask)
66e1707b
BS
1829{
1830 struct scan_control sc = {
1831 .gfp_mask = gfp_mask,
1832 .may_writepage = !laptop_mode,
22fba335 1833 .nr_to_reclaim = SWAP_CLUSTER_MAX,
a6dc60f8 1834 .may_unmap = 1,
2e2e4259 1835 .may_swap = 1,
66e1707b
BS
1836 .swappiness = vm_swappiness,
1837 .order = order,
1838 .mem_cgroup = NULL,
1839 .isolate_pages = isolate_pages_global,
327c0e96 1840 .nodemask = nodemask,
66e1707b
BS
1841 };
1842
dd1a239f 1843 return do_try_to_free_pages(zonelist, &sc);
66e1707b
BS
1844}
1845
00f0b825 1846#ifdef CONFIG_CGROUP_MEM_RES_CTLR
66e1707b 1847
4e416953
BS
1848unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1849 gfp_t gfp_mask, bool noswap,
1850 unsigned int swappiness,
1851 struct zone *zone, int nid)
1852{
1853 struct scan_control sc = {
1854 .may_writepage = !laptop_mode,
1855 .may_unmap = 1,
1856 .may_swap = !noswap,
4e416953
BS
1857 .swappiness = swappiness,
1858 .order = 0,
1859 .mem_cgroup = mem,
1860 .isolate_pages = mem_cgroup_isolate_pages,
1861 };
1862 nodemask_t nm = nodemask_of_node(nid);
1863
1864 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1865 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1866 sc.nodemask = &nm;
1867 sc.nr_reclaimed = 0;
1868 sc.nr_scanned = 0;
1869 /*
1870 * NOTE: Although we can get the priority field, using it
1871 * here is not a good idea, since it limits the pages we can scan.
1872 * if we don't reclaim here, the shrink_zone from balance_pgdat
1873 * will pick up pages from other mem cgroup's as well. We hack
1874 * the priority and make it zero.
1875 */
1876 shrink_zone(0, zone, &sc);
1877 return sc.nr_reclaimed;
1878}
1879
e1a1cd59 1880unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
a7885eb8
KM
1881 gfp_t gfp_mask,
1882 bool noswap,
1883 unsigned int swappiness)
66e1707b 1884{
4e416953 1885 struct zonelist *zonelist;
66e1707b 1886 struct scan_control sc = {
66e1707b 1887 .may_writepage = !laptop_mode,
a6dc60f8 1888 .may_unmap = 1,
2e2e4259 1889 .may_swap = !noswap,
22fba335 1890 .nr_to_reclaim = SWAP_CLUSTER_MAX,
a7885eb8 1891 .swappiness = swappiness,
66e1707b
BS
1892 .order = 0,
1893 .mem_cgroup = mem_cont,
1894 .isolate_pages = mem_cgroup_isolate_pages,
327c0e96 1895 .nodemask = NULL, /* we don't care the placement */
66e1707b 1896 };
66e1707b 1897
dd1a239f
MG
1898 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1899 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1900 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1901 return do_try_to_free_pages(zonelist, &sc);
66e1707b
BS
1902}
1903#endif
1904
f50de2d3 1905/* is kswapd sleeping prematurely? */
bb3ab596 1906static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
f50de2d3 1907{
bb3ab596 1908 int i;
f50de2d3
MG
1909
1910 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
1911 if (remaining)
1912 return 1;
1913
1914 /* If after HZ/10, a zone is below the high mark, it's premature */
bb3ab596
KM
1915 for (i = 0; i < pgdat->nr_zones; i++) {
1916 struct zone *zone = pgdat->node_zones + i;
1917
1918 if (!populated_zone(zone))
1919 continue;
1920
f50de2d3
MG
1921 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1922 0, 0))
1923 return 1;
bb3ab596 1924 }
f50de2d3
MG
1925
1926 return 0;
1927}
1928
1da177e4
LT
1929/*
1930 * For kswapd, balance_pgdat() will work across all this node's zones until
41858966 1931 * they are all at high_wmark_pages(zone).
1da177e4 1932 *
1da177e4
LT
1933 * Returns the number of pages which were actually freed.
1934 *
1935 * There is special handling here for zones which are full of pinned pages.
1936 * This can happen if the pages are all mlocked, or if they are all used by
1937 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1938 * What we do is to detect the case where all pages in the zone have been
1939 * scanned twice and there has been zero successful reclaim. Mark the zone as
1940 * dead and from now on, only perform a short scan. Basically we're polling
1941 * the zone for when the problem goes away.
1942 *
1943 * kswapd scans the zones in the highmem->normal->dma direction. It skips
41858966
MG
1944 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
1945 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
1946 * lower zones regardless of the number of free pages in the lower zones. This
1947 * interoperates with the page allocator fallback scheme to ensure that aging
1948 * of pages is balanced across the zones.
1da177e4 1949 */
d6277db4 1950static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1da177e4 1951{
1da177e4
LT
1952 int all_zones_ok;
1953 int priority;
1954 int i;
69e05944 1955 unsigned long total_scanned;
1da177e4 1956 struct reclaim_state *reclaim_state = current->reclaim_state;
179e9639
AM
1957 struct scan_control sc = {
1958 .gfp_mask = GFP_KERNEL,
a6dc60f8 1959 .may_unmap = 1,
2e2e4259 1960 .may_swap = 1,
22fba335
KM
1961 /*
1962 * kswapd doesn't want to be bailed out while reclaim. because
1963 * we want to put equal scanning pressure on each zone.
1964 */
1965 .nr_to_reclaim = ULONG_MAX,
d6277db4 1966 .swappiness = vm_swappiness,
5ad333eb 1967 .order = order,
66e1707b
BS
1968 .mem_cgroup = NULL,
1969 .isolate_pages = isolate_pages_global,
179e9639 1970 };
3bb1a852
MB
1971 /*
1972 * temp_priority is used to remember the scanning priority at which
41858966
MG
1973 * this zone was successfully refilled to
1974 * free_pages == high_wmark_pages(zone).
3bb1a852
MB
1975 */
1976 int temp_priority[MAX_NR_ZONES];
1da177e4
LT
1977
1978loop_again:
1979 total_scanned = 0;
a79311c1 1980 sc.nr_reclaimed = 0;
c0bbbc73 1981 sc.may_writepage = !laptop_mode;
f8891e5e 1982 count_vm_event(PAGEOUTRUN);
1da177e4 1983
3bb1a852
MB
1984 for (i = 0; i < pgdat->nr_zones; i++)
1985 temp_priority[i] = DEF_PRIORITY;
1da177e4
LT
1986
1987 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1988 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1989 unsigned long lru_pages = 0;
bb3ab596 1990 int has_under_min_watermark_zone = 0;
1da177e4 1991
f7b7fd8f
RR
1992 /* The swap token gets in the way of swapout... */
1993 if (!priority)
1994 disable_swap_token();
1995
1da177e4
LT
1996 all_zones_ok = 1;
1997
d6277db4
RW
1998 /*
1999 * Scan in the highmem->dma direction for the highest
2000 * zone which needs scanning
2001 */
2002 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2003 struct zone *zone = pgdat->node_zones + i;
1da177e4 2004
d6277db4
RW
2005 if (!populated_zone(zone))
2006 continue;
1da177e4 2007
e815af95
DR
2008 if (zone_is_all_unreclaimable(zone) &&
2009 priority != DEF_PRIORITY)
d6277db4 2010 continue;
1da177e4 2011
556adecb
RR
2012 /*
2013 * Do some background aging of the anon list, to give
2014 * pages a chance to be referenced before reclaiming.
2015 */
14797e23 2016 if (inactive_anon_is_low(zone, &sc))
556adecb
RR
2017 shrink_active_list(SWAP_CLUSTER_MAX, zone,
2018 &sc, priority, 0);
2019
41858966
MG
2020 if (!zone_watermark_ok(zone, order,
2021 high_wmark_pages(zone), 0, 0)) {
d6277db4 2022 end_zone = i;
e1dbeda6 2023 break;
1da177e4 2024 }
1da177e4 2025 }
e1dbeda6
AM
2026 if (i < 0)
2027 goto out;
2028
1da177e4
LT
2029 for (i = 0; i <= end_zone; i++) {
2030 struct zone *zone = pgdat->node_zones + i;
2031
adea02a1 2032 lru_pages += zone_reclaimable_pages(zone);
1da177e4
LT
2033 }
2034
2035 /*
2036 * Now scan the zone in the dma->highmem direction, stopping
2037 * at the last zone which needs scanning.
2038 *
2039 * We do this because the page allocator works in the opposite
2040 * direction. This prevents the page allocator from allocating
2041 * pages behind kswapd's direction of progress, which would
2042 * cause too much scanning of the lower zones.
2043 */
2044 for (i = 0; i <= end_zone; i++) {
2045 struct zone *zone = pgdat->node_zones + i;
b15e0905 2046 int nr_slab;
4e416953 2047 int nid, zid;
1da177e4 2048
f3fe6512 2049 if (!populated_zone(zone))
1da177e4
LT
2050 continue;
2051
e815af95
DR
2052 if (zone_is_all_unreclaimable(zone) &&
2053 priority != DEF_PRIORITY)
1da177e4
LT
2054 continue;
2055
41858966
MG
2056 if (!zone_watermark_ok(zone, order,
2057 high_wmark_pages(zone), end_zone, 0))
d6277db4 2058 all_zones_ok = 0;
3bb1a852 2059 temp_priority[i] = priority;
1da177e4 2060 sc.nr_scanned = 0;
3bb1a852 2061 note_zone_scanning_priority(zone, priority);
4e416953
BS
2062
2063 nid = pgdat->node_id;
2064 zid = zone_idx(zone);
2065 /*
2066 * Call soft limit reclaim before calling shrink_zone.
2067 * For now we ignore the return value
2068 */
2069 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
2070 nid, zid);
32a4330d
RR
2071 /*
2072 * We put equal pressure on every zone, unless one
2073 * zone has way too many pages free already.
2074 */
41858966
MG
2075 if (!zone_watermark_ok(zone, order,
2076 8*high_wmark_pages(zone), end_zone, 0))
a79311c1 2077 shrink_zone(priority, zone, &sc);
1da177e4 2078 reclaim_state->reclaimed_slab = 0;
b15e0905
AM
2079 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2080 lru_pages);
a79311c1 2081 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1da177e4 2082 total_scanned += sc.nr_scanned;
e815af95 2083 if (zone_is_all_unreclaimable(zone))
1da177e4 2084 continue;
b15e0905 2085 if (nr_slab == 0 && zone->pages_scanned >=
adea02a1 2086 (zone_reclaimable_pages(zone) * 6))
e815af95
DR
2087 zone_set_flag(zone,
2088 ZONE_ALL_UNRECLAIMABLE);
1da177e4
LT
2089 /*
2090 * If we've done a decent amount of scanning and
2091 * the reclaim ratio is low, start doing writepage
2092 * even in laptop mode
2093 */
2094 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
a79311c1 2095 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
1da177e4 2096 sc.may_writepage = 1;
bb3ab596
KM
2097
2098 /*
2099 * We are still under min water mark. it mean we have
2100 * GFP_ATOMIC allocation failure risk. Hurry up!
2101 */
2102 if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
2103 end_zone, 0))
2104 has_under_min_watermark_zone = 1;
2105
1da177e4 2106 }
1da177e4
LT
2107 if (all_zones_ok)
2108 break; /* kswapd: all done */
2109 /*
2110 * OK, kswapd is getting into trouble. Take a nap, then take
2111 * another pass across the zones.
2112 */
bb3ab596
KM
2113 if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2114 if (has_under_min_watermark_zone)
2115 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2116 else
2117 congestion_wait(BLK_RW_ASYNC, HZ/10);
2118 }
1da177e4
LT
2119
2120 /*
2121 * We do this so kswapd doesn't build up large priorities for
2122 * example when it is freeing in parallel with allocators. It
2123 * matches the direct reclaim path behaviour in terms of impact
2124 * on zone->*_priority.
2125 */
a79311c1 2126 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
1da177e4
LT
2127 break;
2128 }
2129out:
3bb1a852
MB
2130 /*
2131 * Note within each zone the priority level at which this zone was
2132 * brought into a happy state. So that the next thread which scans this
2133 * zone will start out at that priority level.
2134 */
1da177e4
LT
2135 for (i = 0; i < pgdat->nr_zones; i++) {
2136 struct zone *zone = pgdat->node_zones + i;
2137
3bb1a852 2138 zone->prev_priority = temp_priority[i];
1da177e4
LT
2139 }
2140 if (!all_zones_ok) {
2141 cond_resched();
8357376d
RW
2142
2143 try_to_freeze();
2144
73ce02e9
KM
2145 /*
2146 * Fragmentation may mean that the system cannot be
2147 * rebalanced for high-order allocations in all zones.
2148 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2149 * it means the zones have been fully scanned and are still
2150 * not balanced. For high-order allocations, there is
2151 * little point trying all over again as kswapd may
2152 * infinite loop.
2153 *
2154 * Instead, recheck all watermarks at order-0 as they
2155 * are the most important. If watermarks are ok, kswapd will go
2156 * back to sleep. High-order users can still perform direct
2157 * reclaim if they wish.
2158 */
2159 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2160 order = sc.order = 0;
2161
1da177e4
LT
2162 goto loop_again;
2163 }
2164
a79311c1 2165 return sc.nr_reclaimed;
1da177e4
LT
2166}
2167
2168/*
2169 * The background pageout daemon, started as a kernel thread
4f98a2fe 2170 * from the init process.
1da177e4
LT
2171 *
2172 * This basically trickles out pages so that we have _some_
2173 * free memory available even if there is no other activity
2174 * that frees anything up. This is needed for things like routing
2175 * etc, where we otherwise might have all activity going on in
2176 * asynchronous contexts that cannot page things out.
2177 *
2178 * If there are applications that are active memory-allocators
2179 * (most normal use), this basically shouldn't matter.
2180 */
2181static int kswapd(void *p)
2182{
2183 unsigned long order;
2184 pg_data_t *pgdat = (pg_data_t*)p;
2185 struct task_struct *tsk = current;
2186 DEFINE_WAIT(wait);
2187 struct reclaim_state reclaim_state = {
2188 .reclaimed_slab = 0,
2189 };
a70f7302 2190 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1da177e4 2191
cf40bd16
NP
2192 lockdep_set_current_reclaim_state(GFP_KERNEL);
2193
174596a0 2194 if (!cpumask_empty(cpumask))
c5f59f08 2195 set_cpus_allowed_ptr(tsk, cpumask);
1da177e4
LT
2196 current->reclaim_state = &reclaim_state;
2197
2198 /*
2199 * Tell the memory management that we're a "memory allocator",
2200 * and that if we need more memory we should get access to it
2201 * regardless (see "__alloc_pages()"). "kswapd" should
2202 * never get caught in the normal page freeing logic.
2203 *
2204 * (Kswapd normally doesn't need memory anyway, but sometimes
2205 * you need a small amount of memory in order to be able to
2206 * page out something else, and this flag essentially protects
2207 * us from recursively trying to free more memory as we're
2208 * trying to free the first piece of memory in the first place).
2209 */
930d9152 2210 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
83144186 2211 set_freezable();
1da177e4
LT
2212
2213 order = 0;
2214 for ( ; ; ) {
2215 unsigned long new_order;
8fe23e05 2216 int ret;
3e1d1d28 2217
1da177e4
LT
2218 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2219 new_order = pgdat->kswapd_max_order;
2220 pgdat->kswapd_max_order = 0;
2221 if (order < new_order) {
2222 /*
2223 * Don't sleep if someone wants a larger 'order'
2224 * allocation
2225 */
2226 order = new_order;
2227 } else {
f50de2d3
MG
2228 if (!freezing(current) && !kthread_should_stop()) {
2229 long remaining = 0;
2230
2231 /* Try to sleep for a short interval */
bb3ab596 2232 if (!sleeping_prematurely(pgdat, order, remaining)) {
f50de2d3
MG
2233 remaining = schedule_timeout(HZ/10);
2234 finish_wait(&pgdat->kswapd_wait, &wait);
2235 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2236 }
2237
2238 /*
2239 * After a short sleep, check if it was a
2240 * premature sleep. If not, then go fully
2241 * to sleep until explicitly woken up
2242 */
bb3ab596 2243 if (!sleeping_prematurely(pgdat, order, remaining))
f50de2d3
MG
2244 schedule();
2245 else {
2246 if (remaining)
bb3ab596 2247 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
f50de2d3 2248 else
bb3ab596 2249 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
f50de2d3
MG
2250 }
2251 }
b1296cc4 2252
1da177e4
LT
2253 order = pgdat->kswapd_max_order;
2254 }
2255 finish_wait(&pgdat->kswapd_wait, &wait);
2256
8fe23e05
DR
2257 ret = try_to_freeze();
2258 if (kthread_should_stop())
2259 break;
2260
2261 /*
2262 * We can speed up thawing tasks if we don't call balance_pgdat
2263 * after returning from the refrigerator
2264 */
2265 if (!ret)
b1296cc4 2266 balance_pgdat(pgdat, order);
1da177e4
LT
2267 }
2268 return 0;
2269}
2270
2271/*
2272 * A zone is low on free memory, so wake its kswapd task to service it.
2273 */
2274void wakeup_kswapd(struct zone *zone, int order)
2275{
2276 pg_data_t *pgdat;
2277
f3fe6512 2278 if (!populated_zone(zone))
1da177e4
LT
2279 return;
2280
2281 pgdat = zone->zone_pgdat;
41858966 2282 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
1da177e4
LT
2283 return;
2284 if (pgdat->kswapd_max_order < order)
2285 pgdat->kswapd_max_order = order;
02a0e53d 2286 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1da177e4 2287 return;
8d0986e2 2288 if (!waitqueue_active(&pgdat->kswapd_wait))
1da177e4 2289 return;
8d0986e2 2290 wake_up_interruptible(&pgdat->kswapd_wait);
1da177e4
LT
2291}
2292
adea02a1
WF
2293/*
2294 * The reclaimable count would be mostly accurate.
2295 * The less reclaimable pages may be
2296 * - mlocked pages, which will be moved to unevictable list when encountered
2297 * - mapped pages, which may require several travels to be reclaimed
2298 * - dirty pages, which is not "instantly" reclaimable
2299 */
2300unsigned long global_reclaimable_pages(void)
4f98a2fe 2301{
adea02a1
WF
2302 int nr;
2303
2304 nr = global_page_state(NR_ACTIVE_FILE) +
2305 global_page_state(NR_INACTIVE_FILE);
2306
2307 if (nr_swap_pages > 0)
2308 nr += global_page_state(NR_ACTIVE_ANON) +
2309 global_page_state(NR_INACTIVE_ANON);
2310
2311 return nr;
2312}
2313
2314unsigned long zone_reclaimable_pages(struct zone *zone)
2315{
2316 int nr;
2317
2318 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2319 zone_page_state(zone, NR_INACTIVE_FILE);
2320
2321 if (nr_swap_pages > 0)
2322 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2323 zone_page_state(zone, NR_INACTIVE_ANON);
2324
2325 return nr;
4f98a2fe
RR
2326}
2327
c6f37f12 2328#ifdef CONFIG_HIBERNATION
1da177e4 2329/*
7b51755c 2330 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
d6277db4
RW
2331 * freed pages.
2332 *
2333 * Rather than trying to age LRUs the aim is to preserve the overall
2334 * LRU order by reclaiming preferentially
2335 * inactive > active > active referenced > active mapped
1da177e4 2336 */
7b51755c 2337unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
1da177e4 2338{
d6277db4 2339 struct reclaim_state reclaim_state;
d6277db4 2340 struct scan_control sc = {
7b51755c
KM
2341 .gfp_mask = GFP_HIGHUSER_MOVABLE,
2342 .may_swap = 1,
2343 .may_unmap = 1,
d6277db4 2344 .may_writepage = 1,
7b51755c
KM
2345 .nr_to_reclaim = nr_to_reclaim,
2346 .hibernation_mode = 1,
2347 .swappiness = vm_swappiness,
2348 .order = 0,
66e1707b 2349 .isolate_pages = isolate_pages_global,
1da177e4 2350 };
7b51755c
KM
2351 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2352 struct task_struct *p = current;
2353 unsigned long nr_reclaimed;
1da177e4 2354
7b51755c
KM
2355 p->flags |= PF_MEMALLOC;
2356 lockdep_set_current_reclaim_state(sc.gfp_mask);
2357 reclaim_state.reclaimed_slab = 0;
2358 p->reclaim_state = &reclaim_state;
d6277db4 2359
7b51755c 2360 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
d979677c 2361
7b51755c
KM
2362 p->reclaim_state = NULL;
2363 lockdep_clear_current_reclaim_state();
2364 p->flags &= ~PF_MEMALLOC;
d6277db4 2365
7b51755c 2366 return nr_reclaimed;
1da177e4 2367}
c6f37f12 2368#endif /* CONFIG_HIBERNATION */
1da177e4 2369
1da177e4
LT
2370/* It's optimal to keep kswapds on the same CPUs as their memory, but
2371 not required for correctness. So if the last cpu in a node goes
2372 away, we get changed to run anywhere: as the first one comes back,
2373 restore their cpu bindings. */
9c7b216d 2374static int __devinit cpu_callback(struct notifier_block *nfb,
69e05944 2375 unsigned long action, void *hcpu)
1da177e4 2376{
58c0a4a7 2377 int nid;
1da177e4 2378
8bb78442 2379 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
58c0a4a7 2380 for_each_node_state(nid, N_HIGH_MEMORY) {
c5f59f08 2381 pg_data_t *pgdat = NODE_DATA(nid);
a70f7302
RR
2382 const struct cpumask *mask;
2383
2384 mask = cpumask_of_node(pgdat->node_id);
c5f59f08 2385
3e597945 2386 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
1da177e4 2387 /* One of our CPUs online: restore mask */
c5f59f08 2388 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1da177e4
LT
2389 }
2390 }
2391 return NOTIFY_OK;
2392}
1da177e4 2393
3218ae14
YG
2394/*
2395 * This kswapd start function will be called by init and node-hot-add.
2396 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2397 */
2398int kswapd_run(int nid)
2399{
2400 pg_data_t *pgdat = NODE_DATA(nid);
2401 int ret = 0;
2402
2403 if (pgdat->kswapd)
2404 return 0;
2405
2406 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2407 if (IS_ERR(pgdat->kswapd)) {
2408 /* failure at boot is fatal */
2409 BUG_ON(system_state == SYSTEM_BOOTING);
2410 printk("Failed to start kswapd on node %d\n",nid);
2411 ret = -1;
2412 }
2413 return ret;
2414}
2415
8fe23e05
DR
2416/*
2417 * Called by memory hotplug when all memory in a node is offlined.
2418 */
2419void kswapd_stop(int nid)
2420{
2421 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2422
2423 if (kswapd)
2424 kthread_stop(kswapd);
2425}
2426
1da177e4
LT
2427static int __init kswapd_init(void)
2428{
3218ae14 2429 int nid;
69e05944 2430
1da177e4 2431 swap_setup();
9422ffba 2432 for_each_node_state(nid, N_HIGH_MEMORY)
3218ae14 2433 kswapd_run(nid);
1da177e4
LT
2434 hotcpu_notifier(cpu_callback, 0);
2435 return 0;
2436}
2437
2438module_init(kswapd_init)
9eeff239
CL
2439
2440#ifdef CONFIG_NUMA
2441/*
2442 * Zone reclaim mode
2443 *
2444 * If non-zero call zone_reclaim when the number of free pages falls below
2445 * the watermarks.
9eeff239
CL
2446 */
2447int zone_reclaim_mode __read_mostly;
2448
1b2ffb78 2449#define RECLAIM_OFF 0
7d03431c 2450#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
1b2ffb78
CL
2451#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
2452#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
2453
a92f7126
CL
2454/*
2455 * Priority for ZONE_RECLAIM. This determines the fraction of pages
2456 * of a node considered for each zone_reclaim. 4 scans 1/16th of
2457 * a zone.
2458 */
2459#define ZONE_RECLAIM_PRIORITY 4
2460
9614634f
CL
2461/*
2462 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2463 * occur.
2464 */
2465int sysctl_min_unmapped_ratio = 1;
2466
0ff38490
CL
2467/*
2468 * If the number of slab pages in a zone grows beyond this percentage then
2469 * slab reclaim needs to occur.
2470 */
2471int sysctl_min_slab_ratio = 5;
2472
90afa5de
MG
2473static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2474{
2475 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2476 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2477 zone_page_state(zone, NR_ACTIVE_FILE);
2478
2479 /*
2480 * It's possible for there to be more file mapped pages than
2481 * accounted for by the pages on the file LRU lists because
2482 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2483 */
2484 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2485}
2486
2487/* Work out how many page cache pages we can reclaim in this reclaim_mode */
2488static long zone_pagecache_reclaimable(struct zone *zone)
2489{
2490 long nr_pagecache_reclaimable;
2491 long delta = 0;
2492
2493 /*
2494 * If RECLAIM_SWAP is set, then all file pages are considered
2495 * potentially reclaimable. Otherwise, we have to worry about
2496 * pages like swapcache and zone_unmapped_file_pages() provides
2497 * a better estimate
2498 */
2499 if (zone_reclaim_mode & RECLAIM_SWAP)
2500 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2501 else
2502 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2503
2504 /* If we can't clean pages, remove dirty pages from consideration */
2505 if (!(zone_reclaim_mode & RECLAIM_WRITE))
2506 delta += zone_page_state(zone, NR_FILE_DIRTY);
2507
2508 /* Watch for any possible underflows due to delta */
2509 if (unlikely(delta > nr_pagecache_reclaimable))
2510 delta = nr_pagecache_reclaimable;
2511
2512 return nr_pagecache_reclaimable - delta;
2513}
2514
9eeff239
CL
2515/*
2516 * Try to free up some pages from this zone through reclaim.
2517 */
179e9639 2518static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
9eeff239 2519{
7fb2d46d 2520 /* Minimum pages needed in order to stay on node */
69e05944 2521 const unsigned long nr_pages = 1 << order;
9eeff239
CL
2522 struct task_struct *p = current;
2523 struct reclaim_state reclaim_state;
8695949a 2524 int priority;
179e9639
AM
2525 struct scan_control sc = {
2526 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
a6dc60f8 2527 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2e2e4259 2528 .may_swap = 1,
22fba335
KM
2529 .nr_to_reclaim = max_t(unsigned long, nr_pages,
2530 SWAP_CLUSTER_MAX),
179e9639 2531 .gfp_mask = gfp_mask,
d6277db4 2532 .swappiness = vm_swappiness,
bd2f6199 2533 .order = order,
66e1707b 2534 .isolate_pages = isolate_pages_global,
179e9639 2535 };
83e33a47 2536 unsigned long slab_reclaimable;
9eeff239
CL
2537
2538 disable_swap_token();
9eeff239 2539 cond_resched();
d4f7796e
CL
2540 /*
2541 * We need to be able to allocate from the reserves for RECLAIM_SWAP
2542 * and we also need to be able to write out pages for RECLAIM_WRITE
2543 * and RECLAIM_SWAP.
2544 */
2545 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
9eeff239
CL
2546 reclaim_state.reclaimed_slab = 0;
2547 p->reclaim_state = &reclaim_state;
c84db23c 2548
90afa5de 2549 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
0ff38490
CL
2550 /*
2551 * Free memory by calling shrink zone with increasing
2552 * priorities until we have enough memory freed.
2553 */
2554 priority = ZONE_RECLAIM_PRIORITY;
2555 do {
3bb1a852 2556 note_zone_scanning_priority(zone, priority);
a79311c1 2557 shrink_zone(priority, zone, &sc);
0ff38490 2558 priority--;
a79311c1 2559 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
0ff38490 2560 }
c84db23c 2561
83e33a47
CL
2562 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2563 if (slab_reclaimable > zone->min_slab_pages) {
2a16e3f4 2564 /*
7fb2d46d 2565 * shrink_slab() does not currently allow us to determine how
0ff38490
CL
2566 * many pages were freed in this zone. So we take the current
2567 * number of slab pages and shake the slab until it is reduced
2568 * by the same nr_pages that we used for reclaiming unmapped
2569 * pages.
2a16e3f4 2570 *
0ff38490
CL
2571 * Note that shrink_slab will free memory on all zones and may
2572 * take a long time.
2a16e3f4 2573 */
0ff38490 2574 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
83e33a47
CL
2575 zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2576 slab_reclaimable - nr_pages)
0ff38490 2577 ;
83e33a47
CL
2578
2579 /*
2580 * Update nr_reclaimed by the number of slab pages we
2581 * reclaimed from this zone.
2582 */
a79311c1 2583 sc.nr_reclaimed += slab_reclaimable -
83e33a47 2584 zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2a16e3f4
CL
2585 }
2586
9eeff239 2587 p->reclaim_state = NULL;
d4f7796e 2588 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
a79311c1 2589 return sc.nr_reclaimed >= nr_pages;
9eeff239 2590}
179e9639
AM
2591
2592int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2593{
179e9639 2594 int node_id;
d773ed6b 2595 int ret;
179e9639
AM
2596
2597 /*
0ff38490
CL
2598 * Zone reclaim reclaims unmapped file backed pages and
2599 * slab pages if we are over the defined limits.
34aa1330 2600 *
9614634f
CL
2601 * A small portion of unmapped file backed pages is needed for
2602 * file I/O otherwise pages read by file I/O will be immediately
2603 * thrown out if the zone is overallocated. So we do not reclaim
2604 * if less than a specified percentage of the zone is used by
2605 * unmapped file backed pages.
179e9639 2606 */
90afa5de
MG
2607 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2608 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
fa5e084e 2609 return ZONE_RECLAIM_FULL;
179e9639 2610
d773ed6b 2611 if (zone_is_all_unreclaimable(zone))
fa5e084e 2612 return ZONE_RECLAIM_FULL;
d773ed6b 2613
179e9639 2614 /*
d773ed6b 2615 * Do not scan if the allocation should not be delayed.
179e9639 2616 */
d773ed6b 2617 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
fa5e084e 2618 return ZONE_RECLAIM_NOSCAN;
179e9639
AM
2619
2620 /*
2621 * Only run zone reclaim on the local zone or on zones that do not
2622 * have associated processors. This will favor the local processor
2623 * over remote processors and spread off node memory allocations
2624 * as wide as possible.
2625 */
89fa3024 2626 node_id = zone_to_nid(zone);
37c0708d 2627 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
fa5e084e 2628 return ZONE_RECLAIM_NOSCAN;
d773ed6b
DR
2629
2630 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
fa5e084e
MG
2631 return ZONE_RECLAIM_NOSCAN;
2632
d773ed6b
DR
2633 ret = __zone_reclaim(zone, gfp_mask, order);
2634 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2635
24cf7251
MG
2636 if (!ret)
2637 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2638
d773ed6b 2639 return ret;
179e9639 2640}
9eeff239 2641#endif
894bc310 2642
894bc310
LS
2643/*
2644 * page_evictable - test whether a page is evictable
2645 * @page: the page to test
2646 * @vma: the VMA in which the page is or will be mapped, may be NULL
2647 *
2648 * Test whether page is evictable--i.e., should be placed on active/inactive
b291f000
NP
2649 * lists vs unevictable list. The vma argument is !NULL when called from the
2650 * fault path to determine how to instantate a new page.
894bc310
LS
2651 *
2652 * Reasons page might not be evictable:
ba9ddf49 2653 * (1) page's mapping marked unevictable
b291f000 2654 * (2) page is part of an mlocked VMA
ba9ddf49 2655 *
894bc310
LS
2656 */
2657int page_evictable(struct page *page, struct vm_area_struct *vma)
2658{
2659
ba9ddf49
LS
2660 if (mapping_unevictable(page_mapping(page)))
2661 return 0;
2662
b291f000
NP
2663 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2664 return 0;
894bc310
LS
2665
2666 return 1;
2667}
89e004ea
LS
2668
2669/**
2670 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2671 * @page: page to check evictability and move to appropriate lru list
2672 * @zone: zone page is in
2673 *
2674 * Checks a page for evictability and moves the page to the appropriate
2675 * zone lru list.
2676 *
2677 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2678 * have PageUnevictable set.
2679 */
2680static void check_move_unevictable_page(struct page *page, struct zone *zone)
2681{
2682 VM_BUG_ON(PageActive(page));
2683
2684retry:
2685 ClearPageUnevictable(page);
2686 if (page_evictable(page, NULL)) {
401a8e1c 2687 enum lru_list l = page_lru_base_type(page);
af936a16 2688
89e004ea
LS
2689 __dec_zone_state(zone, NR_UNEVICTABLE);
2690 list_move(&page->lru, &zone->lru[l].list);
08e552c6 2691 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
89e004ea
LS
2692 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
2693 __count_vm_event(UNEVICTABLE_PGRESCUED);
2694 } else {
2695 /*
2696 * rotate unevictable list
2697 */
2698 SetPageUnevictable(page);
2699 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
08e552c6 2700 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
89e004ea
LS
2701 if (page_evictable(page, NULL))
2702 goto retry;
2703 }
2704}
2705
2706/**
2707 * scan_mapping_unevictable_pages - scan an address space for evictable pages
2708 * @mapping: struct address_space to scan for evictable pages
2709 *
2710 * Scan all pages in mapping. Check unevictable pages for
2711 * evictability and move them to the appropriate zone lru list.
2712 */
2713void scan_mapping_unevictable_pages(struct address_space *mapping)
2714{
2715 pgoff_t next = 0;
2716 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2717 PAGE_CACHE_SHIFT;
2718 struct zone *zone;
2719 struct pagevec pvec;
2720
2721 if (mapping->nrpages == 0)
2722 return;
2723
2724 pagevec_init(&pvec, 0);
2725 while (next < end &&
2726 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2727 int i;
2728 int pg_scanned = 0;
2729
2730 zone = NULL;
2731
2732 for (i = 0; i < pagevec_count(&pvec); i++) {
2733 struct page *page = pvec.pages[i];
2734 pgoff_t page_index = page->index;
2735 struct zone *pagezone = page_zone(page);
2736
2737 pg_scanned++;
2738 if (page_index > next)
2739 next = page_index;
2740 next++;
2741
2742 if (pagezone != zone) {
2743 if (zone)
2744 spin_unlock_irq(&zone->lru_lock);
2745 zone = pagezone;
2746 spin_lock_irq(&zone->lru_lock);
2747 }
2748
2749 if (PageLRU(page) && PageUnevictable(page))
2750 check_move_unevictable_page(page, zone);
2751 }
2752 if (zone)
2753 spin_unlock_irq(&zone->lru_lock);
2754 pagevec_release(&pvec);
2755
2756 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2757 }
2758
2759}
af936a16
LS
2760
2761/**
2762 * scan_zone_unevictable_pages - check unevictable list for evictable pages
2763 * @zone - zone of which to scan the unevictable list
2764 *
2765 * Scan @zone's unevictable LRU lists to check for pages that have become
2766 * evictable. Move those that have to @zone's inactive list where they
2767 * become candidates for reclaim, unless shrink_inactive_zone() decides
2768 * to reactivate them. Pages that are still unevictable are rotated
2769 * back onto @zone's unevictable list.
2770 */
2771#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
14b90b22 2772static void scan_zone_unevictable_pages(struct zone *zone)
af936a16
LS
2773{
2774 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2775 unsigned long scan;
2776 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2777
2778 while (nr_to_scan > 0) {
2779 unsigned long batch_size = min(nr_to_scan,
2780 SCAN_UNEVICTABLE_BATCH_SIZE);
2781
2782 spin_lock_irq(&zone->lru_lock);
2783 for (scan = 0; scan < batch_size; scan++) {
2784 struct page *page = lru_to_page(l_unevictable);
2785
2786 if (!trylock_page(page))
2787 continue;
2788
2789 prefetchw_prev_lru_page(page, l_unevictable, flags);
2790
2791 if (likely(PageLRU(page) && PageUnevictable(page)))
2792 check_move_unevictable_page(page, zone);
2793
2794 unlock_page(page);
2795 }
2796 spin_unlock_irq(&zone->lru_lock);
2797
2798 nr_to_scan -= batch_size;
2799 }
2800}
2801
2802
2803/**
2804 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2805 *
2806 * A really big hammer: scan all zones' unevictable LRU lists to check for
2807 * pages that have become evictable. Move those back to the zones'
2808 * inactive list where they become candidates for reclaim.
2809 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2810 * and we add swap to the system. As such, it runs in the context of a task
2811 * that has possibly/probably made some previously unevictable pages
2812 * evictable.
2813 */
ff30153b 2814static void scan_all_zones_unevictable_pages(void)
af936a16
LS
2815{
2816 struct zone *zone;
2817
2818 for_each_zone(zone) {
2819 scan_zone_unevictable_pages(zone);
2820 }
2821}
2822
2823/*
2824 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
2825 * all nodes' unevictable lists for evictable pages
2826 */
2827unsigned long scan_unevictable_pages;
2828
2829int scan_unevictable_handler(struct ctl_table *table, int write,
8d65af78 2830 void __user *buffer,
af936a16
LS
2831 size_t *length, loff_t *ppos)
2832{
8d65af78 2833 proc_doulongvec_minmax(table, write, buffer, length, ppos);
af936a16
LS
2834
2835 if (write && *(unsigned long *)table->data)
2836 scan_all_zones_unevictable_pages();
2837
2838 scan_unevictable_pages = 0;
2839 return 0;
2840}
2841
2842/*
2843 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
2844 * a specified node's per zone unevictable lists for evictable pages.
2845 */
2846
2847static ssize_t read_scan_unevictable_node(struct sys_device *dev,
2848 struct sysdev_attribute *attr,
2849 char *buf)
2850{
2851 return sprintf(buf, "0\n"); /* always zero; should fit... */
2852}
2853
2854static ssize_t write_scan_unevictable_node(struct sys_device *dev,
2855 struct sysdev_attribute *attr,
2856 const char *buf, size_t count)
2857{
2858 struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
2859 struct zone *zone;
2860 unsigned long res;
2861 unsigned long req = strict_strtoul(buf, 10, &res);
2862
2863 if (!req)
2864 return 1; /* zero is no-op */
2865
2866 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2867 if (!populated_zone(zone))
2868 continue;
2869 scan_zone_unevictable_pages(zone);
2870 }
2871 return 1;
2872}
2873
2874
2875static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
2876 read_scan_unevictable_node,
2877 write_scan_unevictable_node);
2878
2879int scan_unevictable_register_node(struct node *node)
2880{
2881 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
2882}
2883
2884void scan_unevictable_unregister_node(struct node *node)
2885{
2886 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2887}
2888