]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmscan.c
[PATCH] optimize follow_hugetlb_page
[net-next-2.6.git] / mm / vmscan.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/file.h>
23#include <linux/writeback.h>
24#include <linux/blkdev.h>
25#include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27#include <linux/mm_inline.h>
28#include <linux/pagevec.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/notifier.h>
35#include <linux/rwsem.h>
36
37#include <asm/tlbflush.h>
38#include <asm/div64.h>
39
40#include <linux/swapops.h>
41
0f8053a5
NP
42#include "internal.h"
43
1da177e4
LT
44/* possible outcome of pageout() */
45typedef enum {
46 /* failed to write page out, page is locked */
47 PAGE_KEEP,
48 /* move page to the active list, page is locked */
49 PAGE_ACTIVATE,
50 /* page has been sent to the disk successfully, page is unlocked */
51 PAGE_SUCCESS,
52 /* page is clean and locked */
53 PAGE_CLEAN,
54} pageout_t;
55
56struct scan_control {
1da177e4
LT
57 /* Incremented by the number of inactive pages that were scanned */
58 unsigned long nr_scanned;
59
1da177e4
LT
60 unsigned long nr_mapped; /* From page_state */
61
1da177e4 62 /* This context's GFP mask */
6daa0e28 63 gfp_t gfp_mask;
1da177e4
LT
64
65 int may_writepage;
66
f1fd1067
CL
67 /* Can pages be swapped as part of reclaim? */
68 int may_swap;
69
1da177e4
LT
70 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
71 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
72 * In this context, it doesn't matter that we scan the
73 * whole list at once. */
74 int swap_cluster_max;
75};
76
77/*
78 * The list of shrinker callbacks used by to apply pressure to
79 * ageable caches.
80 */
81struct shrinker {
82 shrinker_t shrinker;
83 struct list_head list;
84 int seeks; /* seeks to recreate an obj */
85 long nr; /* objs pending delete */
86};
87
88#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
89
90#ifdef ARCH_HAS_PREFETCH
91#define prefetch_prev_lru_page(_page, _base, _field) \
92 do { \
93 if ((_page)->lru.prev != _base) { \
94 struct page *prev; \
95 \
96 prev = lru_to_page(&(_page->lru)); \
97 prefetch(&prev->_field); \
98 } \
99 } while (0)
100#else
101#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
102#endif
103
104#ifdef ARCH_HAS_PREFETCHW
105#define prefetchw_prev_lru_page(_page, _base, _field) \
106 do { \
107 if ((_page)->lru.prev != _base) { \
108 struct page *prev; \
109 \
110 prev = lru_to_page(&(_page->lru)); \
111 prefetchw(&prev->_field); \
112 } \
113 } while (0)
114#else
115#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
116#endif
117
118/*
119 * From 0 .. 100. Higher means more swappy.
120 */
121int vm_swappiness = 60;
122static long total_memory;
123
124static LIST_HEAD(shrinker_list);
125static DECLARE_RWSEM(shrinker_rwsem);
126
127/*
128 * Add a shrinker callback to be called from the vm
129 */
130struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
131{
132 struct shrinker *shrinker;
133
134 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
135 if (shrinker) {
136 shrinker->shrinker = theshrinker;
137 shrinker->seeks = seeks;
138 shrinker->nr = 0;
139 down_write(&shrinker_rwsem);
140 list_add_tail(&shrinker->list, &shrinker_list);
141 up_write(&shrinker_rwsem);
142 }
143 return shrinker;
144}
145EXPORT_SYMBOL(set_shrinker);
146
147/*
148 * Remove one
149 */
150void remove_shrinker(struct shrinker *shrinker)
151{
152 down_write(&shrinker_rwsem);
153 list_del(&shrinker->list);
154 up_write(&shrinker_rwsem);
155 kfree(shrinker);
156}
157EXPORT_SYMBOL(remove_shrinker);
158
159#define SHRINK_BATCH 128
160/*
161 * Call the shrink functions to age shrinkable caches
162 *
163 * Here we assume it costs one seek to replace a lru page and that it also
164 * takes a seek to recreate a cache object. With this in mind we age equal
165 * percentages of the lru and ageable caches. This should balance the seeks
166 * generated by these structures.
167 *
168 * If the vm encounted mapped pages on the LRU it increase the pressure on
169 * slab to avoid swapping.
170 *
171 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
172 *
173 * `lru_pages' represents the number of on-LRU pages in all the zones which
174 * are eligible for the caller's allocation attempt. It is used for balancing
175 * slab reclaim versus page reclaim.
b15e0905
AM
176 *
177 * Returns the number of slab objects which we shrunk.
1da177e4 178 */
69e05944
AM
179unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
180 unsigned long lru_pages)
1da177e4
LT
181{
182 struct shrinker *shrinker;
69e05944 183 unsigned long ret = 0;
1da177e4
LT
184
185 if (scanned == 0)
186 scanned = SWAP_CLUSTER_MAX;
187
188 if (!down_read_trylock(&shrinker_rwsem))
b15e0905 189 return 1; /* Assume we'll be able to shrink next time */
1da177e4
LT
190
191 list_for_each_entry(shrinker, &shrinker_list, list) {
192 unsigned long long delta;
193 unsigned long total_scan;
ea164d73 194 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
1da177e4
LT
195
196 delta = (4 * scanned) / shrinker->seeks;
ea164d73 197 delta *= max_pass;
1da177e4
LT
198 do_div(delta, lru_pages + 1);
199 shrinker->nr += delta;
ea164d73
AA
200 if (shrinker->nr < 0) {
201 printk(KERN_ERR "%s: nr=%ld\n",
202 __FUNCTION__, shrinker->nr);
203 shrinker->nr = max_pass;
204 }
205
206 /*
207 * Avoid risking looping forever due to too large nr value:
208 * never try to free more than twice the estimate number of
209 * freeable entries.
210 */
211 if (shrinker->nr > max_pass * 2)
212 shrinker->nr = max_pass * 2;
1da177e4
LT
213
214 total_scan = shrinker->nr;
215 shrinker->nr = 0;
216
217 while (total_scan >= SHRINK_BATCH) {
218 long this_scan = SHRINK_BATCH;
219 int shrink_ret;
b15e0905 220 int nr_before;
1da177e4 221
b15e0905 222 nr_before = (*shrinker->shrinker)(0, gfp_mask);
1da177e4
LT
223 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
224 if (shrink_ret == -1)
225 break;
b15e0905
AM
226 if (shrink_ret < nr_before)
227 ret += nr_before - shrink_ret;
1da177e4
LT
228 mod_page_state(slabs_scanned, this_scan);
229 total_scan -= this_scan;
230
231 cond_resched();
232 }
233
234 shrinker->nr += total_scan;
235 }
236 up_read(&shrinker_rwsem);
b15e0905 237 return ret;
1da177e4
LT
238}
239
240/* Called without lock on whether page is mapped, so answer is unstable */
241static inline int page_mapping_inuse(struct page *page)
242{
243 struct address_space *mapping;
244
245 /* Page is in somebody's page tables. */
246 if (page_mapped(page))
247 return 1;
248
249 /* Be more reluctant to reclaim swapcache than pagecache */
250 if (PageSwapCache(page))
251 return 1;
252
253 mapping = page_mapping(page);
254 if (!mapping)
255 return 0;
256
257 /* File is mmap'd by somebody? */
258 return mapping_mapped(mapping);
259}
260
261static inline int is_page_cache_freeable(struct page *page)
262{
263 return page_count(page) - !!PagePrivate(page) == 2;
264}
265
266static int may_write_to_queue(struct backing_dev_info *bdi)
267{
930d9152 268 if (current->flags & PF_SWAPWRITE)
1da177e4
LT
269 return 1;
270 if (!bdi_write_congested(bdi))
271 return 1;
272 if (bdi == current->backing_dev_info)
273 return 1;
274 return 0;
275}
276
277/*
278 * We detected a synchronous write error writing a page out. Probably
279 * -ENOSPC. We need to propagate that into the address_space for a subsequent
280 * fsync(), msync() or close().
281 *
282 * The tricky part is that after writepage we cannot touch the mapping: nothing
283 * prevents it from being freed up. But we have a ref on the page and once
284 * that page is locked, the mapping is pinned.
285 *
286 * We're allowed to run sleeping lock_page() here because we know the caller has
287 * __GFP_FS.
288 */
289static void handle_write_error(struct address_space *mapping,
290 struct page *page, int error)
291{
292 lock_page(page);
293 if (page_mapping(page) == mapping) {
294 if (error == -ENOSPC)
295 set_bit(AS_ENOSPC, &mapping->flags);
296 else
297 set_bit(AS_EIO, &mapping->flags);
298 }
299 unlock_page(page);
300}
301
302/*
1742f19f
AM
303 * pageout is called by shrink_page_list() for each dirty page.
304 * Calls ->writepage().
1da177e4
LT
305 */
306static pageout_t pageout(struct page *page, struct address_space *mapping)
307{
308 /*
309 * If the page is dirty, only perform writeback if that write
310 * will be non-blocking. To prevent this allocation from being
311 * stalled by pagecache activity. But note that there may be
312 * stalls if we need to run get_block(). We could test
313 * PagePrivate for that.
314 *
315 * If this process is currently in generic_file_write() against
316 * this page's queue, we can perform writeback even if that
317 * will block.
318 *
319 * If the page is swapcache, write it back even if that would
320 * block, for some throttling. This happens by accident, because
321 * swap_backing_dev_info is bust: it doesn't reflect the
322 * congestion state of the swapdevs. Easy to fix, if needed.
323 * See swapfile.c:page_queue_congested().
324 */
325 if (!is_page_cache_freeable(page))
326 return PAGE_KEEP;
327 if (!mapping) {
328 /*
329 * Some data journaling orphaned pages can have
330 * page->mapping == NULL while being dirty with clean buffers.
331 */
323aca6c 332 if (PagePrivate(page)) {
1da177e4
LT
333 if (try_to_free_buffers(page)) {
334 ClearPageDirty(page);
335 printk("%s: orphaned page\n", __FUNCTION__);
336 return PAGE_CLEAN;
337 }
338 }
339 return PAGE_KEEP;
340 }
341 if (mapping->a_ops->writepage == NULL)
342 return PAGE_ACTIVATE;
343 if (!may_write_to_queue(mapping->backing_dev_info))
344 return PAGE_KEEP;
345
346 if (clear_page_dirty_for_io(page)) {
347 int res;
348 struct writeback_control wbc = {
349 .sync_mode = WB_SYNC_NONE,
350 .nr_to_write = SWAP_CLUSTER_MAX,
351 .nonblocking = 1,
352 .for_reclaim = 1,
353 };
354
355 SetPageReclaim(page);
356 res = mapping->a_ops->writepage(page, &wbc);
357 if (res < 0)
358 handle_write_error(mapping, page, res);
994fc28c 359 if (res == AOP_WRITEPAGE_ACTIVATE) {
1da177e4
LT
360 ClearPageReclaim(page);
361 return PAGE_ACTIVATE;
362 }
363 if (!PageWriteback(page)) {
364 /* synchronous write or broken a_ops? */
365 ClearPageReclaim(page);
366 }
367
368 return PAGE_SUCCESS;
369 }
370
371 return PAGE_CLEAN;
372}
373
49d2e9cc
CL
374static int remove_mapping(struct address_space *mapping, struct page *page)
375{
376 if (!mapping)
377 return 0; /* truncate got there first */
378
379 write_lock_irq(&mapping->tree_lock);
380
381 /*
382 * The non-racy check for busy page. It is critical to check
383 * PageDirty _after_ making sure that the page is freeable and
384 * not in use by anybody. (pagecache + us == 2)
385 */
386 if (unlikely(page_count(page) != 2))
387 goto cannot_free;
388 smp_rmb();
389 if (unlikely(PageDirty(page)))
390 goto cannot_free;
391
392 if (PageSwapCache(page)) {
393 swp_entry_t swap = { .val = page_private(page) };
394 __delete_from_swap_cache(page);
395 write_unlock_irq(&mapping->tree_lock);
396 swap_free(swap);
397 __put_page(page); /* The pagecache ref */
398 return 1;
399 }
400
401 __remove_from_page_cache(page);
402 write_unlock_irq(&mapping->tree_lock);
403 __put_page(page);
404 return 1;
405
406cannot_free:
407 write_unlock_irq(&mapping->tree_lock);
408 return 0;
409}
410
1da177e4 411/*
1742f19f 412 * shrink_page_list() returns the number of reclaimed pages
1da177e4 413 */
1742f19f
AM
414static unsigned long shrink_page_list(struct list_head *page_list,
415 struct scan_control *sc)
1da177e4
LT
416{
417 LIST_HEAD(ret_pages);
418 struct pagevec freed_pvec;
419 int pgactivate = 0;
05ff5137 420 unsigned long nr_reclaimed = 0;
1da177e4
LT
421
422 cond_resched();
423
424 pagevec_init(&freed_pvec, 1);
425 while (!list_empty(page_list)) {
426 struct address_space *mapping;
427 struct page *page;
428 int may_enter_fs;
429 int referenced;
430
431 cond_resched();
432
433 page = lru_to_page(page_list);
434 list_del(&page->lru);
435
436 if (TestSetPageLocked(page))
437 goto keep;
438
439 BUG_ON(PageActive(page));
440
441 sc->nr_scanned++;
80e43426
CL
442
443 if (!sc->may_swap && page_mapped(page))
444 goto keep_locked;
445
1da177e4
LT
446 /* Double the slab pressure for mapped and swapcache pages */
447 if (page_mapped(page) || PageSwapCache(page))
448 sc->nr_scanned++;
449
450 if (PageWriteback(page))
451 goto keep_locked;
452
f7b7fd8f 453 referenced = page_referenced(page, 1);
1da177e4
LT
454 /* In active use or really unfreeable? Activate it. */
455 if (referenced && page_mapping_inuse(page))
456 goto activate_locked;
457
458#ifdef CONFIG_SWAP
459 /*
460 * Anonymous process memory has backing store?
461 * Try to allocate it some swap space here.
462 */
6e5ef1a9 463 if (PageAnon(page) && !PageSwapCache(page))
1480a540 464 if (!add_to_swap(page, GFP_ATOMIC))
1da177e4 465 goto activate_locked;
1da177e4
LT
466#endif /* CONFIG_SWAP */
467
468 mapping = page_mapping(page);
469 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
470 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
471
472 /*
473 * The page is mapped into the page tables of one or more
474 * processes. Try to unmap it here.
475 */
476 if (page_mapped(page) && mapping) {
a48d07af 477 switch (try_to_unmap(page, 0)) {
1da177e4
LT
478 case SWAP_FAIL:
479 goto activate_locked;
480 case SWAP_AGAIN:
481 goto keep_locked;
482 case SWAP_SUCCESS:
483 ; /* try to free the page below */
484 }
485 }
486
487 if (PageDirty(page)) {
488 if (referenced)
489 goto keep_locked;
490 if (!may_enter_fs)
491 goto keep_locked;
52a8363e 492 if (!sc->may_writepage)
1da177e4
LT
493 goto keep_locked;
494
495 /* Page is dirty, try to write it out here */
496 switch(pageout(page, mapping)) {
497 case PAGE_KEEP:
498 goto keep_locked;
499 case PAGE_ACTIVATE:
500 goto activate_locked;
501 case PAGE_SUCCESS:
502 if (PageWriteback(page) || PageDirty(page))
503 goto keep;
504 /*
505 * A synchronous write - probably a ramdisk. Go
506 * ahead and try to reclaim the page.
507 */
508 if (TestSetPageLocked(page))
509 goto keep;
510 if (PageDirty(page) || PageWriteback(page))
511 goto keep_locked;
512 mapping = page_mapping(page);
513 case PAGE_CLEAN:
514 ; /* try to free the page below */
515 }
516 }
517
518 /*
519 * If the page has buffers, try to free the buffer mappings
520 * associated with this page. If we succeed we try to free
521 * the page as well.
522 *
523 * We do this even if the page is PageDirty().
524 * try_to_release_page() does not perform I/O, but it is
525 * possible for a page to have PageDirty set, but it is actually
526 * clean (all its buffers are clean). This happens if the
527 * buffers were written out directly, with submit_bh(). ext3
528 * will do this, as well as the blockdev mapping.
529 * try_to_release_page() will discover that cleanness and will
530 * drop the buffers and mark the page clean - it can be freed.
531 *
532 * Rarely, pages can have buffers and no ->mapping. These are
533 * the pages which were not successfully invalidated in
534 * truncate_complete_page(). We try to drop those buffers here
535 * and if that worked, and the page is no longer mapped into
536 * process address space (page_count == 1) it can be freed.
537 * Otherwise, leave the page on the LRU so it is swappable.
538 */
539 if (PagePrivate(page)) {
540 if (!try_to_release_page(page, sc->gfp_mask))
541 goto activate_locked;
542 if (!mapping && page_count(page) == 1)
543 goto free_it;
544 }
545
49d2e9cc
CL
546 if (!remove_mapping(mapping, page))
547 goto keep_locked;
1da177e4
LT
548
549free_it:
550 unlock_page(page);
05ff5137 551 nr_reclaimed++;
1da177e4
LT
552 if (!pagevec_add(&freed_pvec, page))
553 __pagevec_release_nonlru(&freed_pvec);
554 continue;
555
556activate_locked:
557 SetPageActive(page);
558 pgactivate++;
559keep_locked:
560 unlock_page(page);
561keep:
562 list_add(&page->lru, &ret_pages);
563 BUG_ON(PageLRU(page));
564 }
565 list_splice(&ret_pages, page_list);
566 if (pagevec_count(&freed_pvec))
567 __pagevec_release_nonlru(&freed_pvec);
568 mod_page_state(pgactivate, pgactivate);
05ff5137 569 return nr_reclaimed;
1da177e4
LT
570}
571
7cbe34cf 572#ifdef CONFIG_MIGRATION
8419c318
CL
573static inline void move_to_lru(struct page *page)
574{
575 list_del(&page->lru);
576 if (PageActive(page)) {
577 /*
578 * lru_cache_add_active checks that
579 * the PG_active bit is off.
580 */
581 ClearPageActive(page);
582 lru_cache_add_active(page);
583 } else {
584 lru_cache_add(page);
585 }
586 put_page(page);
587}
588
589/*
053837fc 590 * Add isolated pages on the list back to the LRU.
8419c318
CL
591 *
592 * returns the number of pages put back.
593 */
69e05944 594unsigned long putback_lru_pages(struct list_head *l)
8419c318
CL
595{
596 struct page *page;
597 struct page *page2;
69e05944 598 unsigned long count = 0;
8419c318
CL
599
600 list_for_each_entry_safe(page, page2, l, lru) {
601 move_to_lru(page);
602 count++;
603 }
604 return count;
605}
606
e965f963
CL
607/*
608 * Non migratable page
609 */
610int fail_migrate_page(struct page *newpage, struct page *page)
611{
612 return -EIO;
613}
614EXPORT_SYMBOL(fail_migrate_page);
615
49d2e9cc
CL
616/*
617 * swapout a single page
618 * page is locked upon entry, unlocked on exit
49d2e9cc
CL
619 */
620static int swap_page(struct page *page)
621{
622 struct address_space *mapping = page_mapping(page);
623
624 if (page_mapped(page) && mapping)
418aade4 625 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
49d2e9cc
CL
626 goto unlock_retry;
627
628 if (PageDirty(page)) {
629 /* Page is dirty, try to write it out here */
630 switch(pageout(page, mapping)) {
631 case PAGE_KEEP:
632 case PAGE_ACTIVATE:
633 goto unlock_retry;
634
635 case PAGE_SUCCESS:
636 goto retry;
637
638 case PAGE_CLEAN:
639 ; /* try to free the page below */
640 }
641 }
642
643 if (PagePrivate(page)) {
644 if (!try_to_release_page(page, GFP_KERNEL) ||
645 (!mapping && page_count(page) == 1))
646 goto unlock_retry;
647 }
648
649 if (remove_mapping(mapping, page)) {
650 /* Success */
651 unlock_page(page);
652 return 0;
653 }
654
655unlock_retry:
656 unlock_page(page);
657
658retry:
d0d96328 659 return -EAGAIN;
49d2e9cc 660}
e965f963 661EXPORT_SYMBOL(swap_page);
a48d07af
CL
662
663/*
664 * Page migration was first developed in the context of the memory hotplug
665 * project. The main authors of the migration code are:
666 *
667 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
668 * Hirokazu Takahashi <taka@valinux.co.jp>
669 * Dave Hansen <haveblue@us.ibm.com>
670 * Christoph Lameter <clameter@sgi.com>
671 */
672
673/*
674 * Remove references for a page and establish the new page with the correct
675 * basic settings to be able to stop accesses to the page.
676 */
e965f963 677int migrate_page_remove_references(struct page *newpage,
a48d07af
CL
678 struct page *page, int nr_refs)
679{
680 struct address_space *mapping = page_mapping(page);
681 struct page **radix_pointer;
682
683 /*
684 * Avoid doing any of the following work if the page count
685 * indicates that the page is in use or truncate has removed
686 * the page.
687 */
688 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
4983da07 689 return -EAGAIN;
a48d07af
CL
690
691 /*
692 * Establish swap ptes for anonymous pages or destroy pte
693 * maps for files.
694 *
695 * In order to reestablish file backed mappings the fault handlers
696 * will take the radix tree_lock which may then be used to stop
697 * processses from accessing this page until the new page is ready.
698 *
699 * A process accessing via a swap pte (an anonymous page) will take a
700 * page_lock on the old page which will block the process until the
701 * migration attempt is complete. At that time the PageSwapCache bit
702 * will be examined. If the page was migrated then the PageSwapCache
703 * bit will be clear and the operation to retrieve the page will be
704 * retried which will find the new page in the radix tree. Then a new
705 * direct mapping may be generated based on the radix tree contents.
706 *
707 * If the page was not migrated then the PageSwapCache bit
708 * is still set and the operation may continue.
709 */
4983da07
CL
710 if (try_to_unmap(page, 1) == SWAP_FAIL)
711 /* A vma has VM_LOCKED set -> Permanent failure */
712 return -EPERM;
a48d07af
CL
713
714 /*
715 * Give up if we were unable to remove all mappings.
716 */
717 if (page_mapcount(page))
4983da07 718 return -EAGAIN;
a48d07af
CL
719
720 write_lock_irq(&mapping->tree_lock);
721
722 radix_pointer = (struct page **)radix_tree_lookup_slot(
723 &mapping->page_tree,
724 page_index(page));
725
726 if (!page_mapping(page) || page_count(page) != nr_refs ||
727 *radix_pointer != page) {
728 write_unlock_irq(&mapping->tree_lock);
4983da07 729 return -EAGAIN;
a48d07af
CL
730 }
731
732 /*
733 * Now we know that no one else is looking at the page.
734 *
735 * Certain minimal information about a page must be available
736 * in order for other subsystems to properly handle the page if they
737 * find it through the radix tree update before we are finished
738 * copying the page.
739 */
740 get_page(newpage);
741 newpage->index = page->index;
742 newpage->mapping = page->mapping;
743 if (PageSwapCache(page)) {
744 SetPageSwapCache(newpage);
745 set_page_private(newpage, page_private(page));
746 }
747
748 *radix_pointer = newpage;
749 __put_page(page);
750 write_unlock_irq(&mapping->tree_lock);
751
752 return 0;
753}
e965f963 754EXPORT_SYMBOL(migrate_page_remove_references);
a48d07af
CL
755
756/*
757 * Copy the page to its new location
758 */
759void migrate_page_copy(struct page *newpage, struct page *page)
760{
761 copy_highpage(newpage, page);
762
763 if (PageError(page))
764 SetPageError(newpage);
765 if (PageReferenced(page))
766 SetPageReferenced(newpage);
767 if (PageUptodate(page))
768 SetPageUptodate(newpage);
769 if (PageActive(page))
770 SetPageActive(newpage);
771 if (PageChecked(page))
772 SetPageChecked(newpage);
773 if (PageMappedToDisk(page))
774 SetPageMappedToDisk(newpage);
775
776 if (PageDirty(page)) {
777 clear_page_dirty_for_io(page);
778 set_page_dirty(newpage);
779 }
780
781 ClearPageSwapCache(page);
782 ClearPageActive(page);
783 ClearPagePrivate(page);
784 set_page_private(page, 0);
785 page->mapping = NULL;
786
787 /*
788 * If any waiters have accumulated on the new page then
789 * wake them up.
790 */
791 if (PageWriteback(newpage))
792 end_page_writeback(newpage);
793}
e965f963 794EXPORT_SYMBOL(migrate_page_copy);
a48d07af
CL
795
796/*
797 * Common logic to directly migrate a single page suitable for
798 * pages that do not use PagePrivate.
799 *
800 * Pages are locked upon entry and exit.
801 */
802int migrate_page(struct page *newpage, struct page *page)
803{
4983da07
CL
804 int rc;
805
a48d07af
CL
806 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
807
4983da07
CL
808 rc = migrate_page_remove_references(newpage, page, 2);
809
810 if (rc)
811 return rc;
a48d07af
CL
812
813 migrate_page_copy(newpage, page);
814
a3351e52
CL
815 /*
816 * Remove auxiliary swap entries and replace
817 * them with real ptes.
818 *
819 * Note that a real pte entry will allow processes that are not
820 * waiting on the page lock to use the new page via the page tables
821 * before the new page is unlocked.
822 */
823 remove_from_swap(newpage);
a48d07af
CL
824 return 0;
825}
e965f963 826EXPORT_SYMBOL(migrate_page);
a48d07af 827
49d2e9cc
CL
828/*
829 * migrate_pages
830 *
831 * Two lists are passed to this function. The first list
832 * contains the pages isolated from the LRU to be migrated.
833 * The second list contains new pages that the pages isolated
834 * can be moved to. If the second list is NULL then all
835 * pages are swapped out.
836 *
837 * The function returns after 10 attempts or if no pages
418aade4 838 * are movable anymore because to has become empty
49d2e9cc
CL
839 * or no retryable pages exist anymore.
840 *
d0d96328 841 * Return: Number of pages not migrated when "to" ran empty.
49d2e9cc 842 */
69e05944 843unsigned long migrate_pages(struct list_head *from, struct list_head *to,
d4984711 844 struct list_head *moved, struct list_head *failed)
49d2e9cc 845{
69e05944
AM
846 unsigned long retry;
847 unsigned long nr_failed = 0;
49d2e9cc
CL
848 int pass = 0;
849 struct page *page;
850 struct page *page2;
851 int swapwrite = current->flags & PF_SWAPWRITE;
d0d96328 852 int rc;
49d2e9cc
CL
853
854 if (!swapwrite)
855 current->flags |= PF_SWAPWRITE;
856
857redo:
858 retry = 0;
859
d4984711 860 list_for_each_entry_safe(page, page2, from, lru) {
a48d07af
CL
861 struct page *newpage = NULL;
862 struct address_space *mapping;
863
49d2e9cc
CL
864 cond_resched();
865
d0d96328
CL
866 rc = 0;
867 if (page_count(page) == 1)
ee27497d 868 /* page was freed from under us. So we are done. */
d0d96328
CL
869 goto next;
870
a48d07af
CL
871 if (to && list_empty(to))
872 break;
873
49d2e9cc
CL
874 /*
875 * Skip locked pages during the first two passes to give the
7cbe34cf
CL
876 * functions holding the lock time to release the page. Later we
877 * use lock_page() to have a higher chance of acquiring the
878 * lock.
49d2e9cc 879 */
d0d96328 880 rc = -EAGAIN;
49d2e9cc
CL
881 if (pass > 2)
882 lock_page(page);
883 else
884 if (TestSetPageLocked(page))
d0d96328 885 goto next;
49d2e9cc
CL
886
887 /*
888 * Only wait on writeback if we have already done a pass where
889 * we we may have triggered writeouts for lots of pages.
890 */
7cbe34cf 891 if (pass > 0) {
49d2e9cc 892 wait_on_page_writeback(page);
7cbe34cf 893 } else {
d0d96328
CL
894 if (PageWriteback(page))
895 goto unlock_page;
7cbe34cf 896 }
49d2e9cc 897
d0d96328
CL
898 /*
899 * Anonymous pages must have swap cache references otherwise
900 * the information contained in the page maps cannot be
901 * preserved.
902 */
49d2e9cc 903 if (PageAnon(page) && !PageSwapCache(page)) {
1480a540 904 if (!add_to_swap(page, GFP_KERNEL)) {
d0d96328
CL
905 rc = -ENOMEM;
906 goto unlock_page;
49d2e9cc
CL
907 }
908 }
49d2e9cc 909
a48d07af
CL
910 if (!to) {
911 rc = swap_page(page);
912 goto next;
913 }
914
915 newpage = lru_to_page(to);
916 lock_page(newpage);
917
49d2e9cc 918 /*
a48d07af 919 * Pages are properly locked and writeback is complete.
49d2e9cc
CL
920 * Try to migrate the page.
921 */
a48d07af
CL
922 mapping = page_mapping(page);
923 if (!mapping)
924 goto unlock_both;
925
e965f963 926 if (mapping->a_ops->migratepage) {
418aade4
CL
927 /*
928 * Most pages have a mapping and most filesystems
929 * should provide a migration function. Anonymous
930 * pages are part of swap space which also has its
931 * own migration function. This is the most common
932 * path for page migration.
933 */
e965f963
CL
934 rc = mapping->a_ops->migratepage(newpage, page);
935 goto unlock_both;
936 }
937
a48d07af 938 /*
418aade4
CL
939 * Default handling if a filesystem does not provide
940 * a migration function. We can only migrate clean
941 * pages so try to write out any dirty pages first.
a48d07af
CL
942 */
943 if (PageDirty(page)) {
944 switch (pageout(page, mapping)) {
945 case PAGE_KEEP:
946 case PAGE_ACTIVATE:
947 goto unlock_both;
948
949 case PAGE_SUCCESS:
950 unlock_page(newpage);
951 goto next;
952
953 case PAGE_CLEAN:
954 ; /* try to migrate the page below */
955 }
956 }
418aade4 957
a48d07af 958 /*
418aade4
CL
959 * Buffers are managed in a filesystem specific way.
960 * We must have no buffers or drop them.
a48d07af
CL
961 */
962 if (!page_has_buffers(page) ||
963 try_to_release_page(page, GFP_KERNEL)) {
964 rc = migrate_page(newpage, page);
965 goto unlock_both;
966 }
967
968 /*
969 * On early passes with mapped pages simply
970 * retry. There may be a lock held for some
971 * buffers that may go away. Later
972 * swap them out.
973 */
974 if (pass > 4) {
418aade4
CL
975 /*
976 * Persistently unable to drop buffers..... As a
977 * measure of last resort we fall back to
978 * swap_page().
979 */
a48d07af
CL
980 unlock_page(newpage);
981 newpage = NULL;
982 rc = swap_page(page);
983 goto next;
984 }
985
986unlock_both:
987 unlock_page(newpage);
d0d96328
CL
988
989unlock_page:
990 unlock_page(page);
991
992next:
993 if (rc == -EAGAIN) {
994 retry++;
995 } else if (rc) {
996 /* Permanent failure */
997 list_move(&page->lru, failed);
998 nr_failed++;
999 } else {
a48d07af
CL
1000 if (newpage) {
1001 /* Successful migration. Return page to LRU */
1002 move_to_lru(newpage);
1003 }
d4984711 1004 list_move(&page->lru, moved);
d4984711 1005 }
49d2e9cc
CL
1006 }
1007 if (retry && pass++ < 10)
1008 goto redo;
1009
1010 if (!swapwrite)
1011 current->flags &= ~PF_SWAPWRITE;
1012
49d2e9cc
CL
1013 return nr_failed + retry;
1014}
8419c318 1015
8419c318
CL
1016/*
1017 * Isolate one page from the LRU lists and put it on the
053837fc 1018 * indicated list with elevated refcount.
8419c318
CL
1019 *
1020 * Result:
1021 * 0 = page not on LRU list
1022 * 1 = page removed from LRU list and added to the specified list.
8419c318
CL
1023 */
1024int isolate_lru_page(struct page *page)
1025{
053837fc 1026 int ret = 0;
8419c318 1027
053837fc
NP
1028 if (PageLRU(page)) {
1029 struct zone *zone = page_zone(page);
1030 spin_lock_irq(&zone->lru_lock);
8d438f96 1031 if (PageLRU(page)) {
053837fc
NP
1032 ret = 1;
1033 get_page(page);
8d438f96 1034 ClearPageLRU(page);
053837fc
NP
1035 if (PageActive(page))
1036 del_page_from_active_list(zone, page);
1037 else
1038 del_page_from_inactive_list(zone, page);
1039 }
1040 spin_unlock_irq(&zone->lru_lock);
8419c318 1041 }
053837fc
NP
1042
1043 return ret;
8419c318 1044}
7cbe34cf 1045#endif
49d2e9cc 1046
1da177e4
LT
1047/*
1048 * zone->lru_lock is heavily contended. Some of the functions that
1049 * shrink the lists perform better by taking out a batch of pages
1050 * and working on them outside the LRU lock.
1051 *
1052 * For pagecache intensive workloads, this function is the hottest
1053 * spot in the kernel (apart from copy_*_user functions).
1054 *
1055 * Appropriate locks must be held before calling this function.
1056 *
1057 * @nr_to_scan: The number of pages to look through on the list.
1058 * @src: The LRU list to pull pages off.
1059 * @dst: The temp list to put pages on to.
1060 * @scanned: The number of pages that were scanned.
1061 *
1062 * returns how many pages were moved onto *@dst.
1063 */
69e05944
AM
1064static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1065 struct list_head *src, struct list_head *dst,
1066 unsigned long *scanned)
1da177e4 1067{
69e05944 1068 unsigned long nr_taken = 0;
1da177e4 1069 struct page *page;
c9b02d97 1070 unsigned long scan;
1da177e4 1071
c9b02d97 1072 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
7c8ee9a8 1073 struct list_head *target;
1da177e4
LT
1074 page = lru_to_page(src);
1075 prefetchw_prev_lru_page(page, src, flags);
1076
8d438f96
NP
1077 BUG_ON(!PageLRU(page));
1078
053837fc 1079 list_del(&page->lru);
7c8ee9a8
NP
1080 target = src;
1081 if (likely(get_page_unless_zero(page))) {
053837fc 1082 /*
7c8ee9a8
NP
1083 * Be careful not to clear PageLRU until after we're
1084 * sure the page is not being freed elsewhere -- the
1085 * page release code relies on it.
053837fc 1086 */
7c8ee9a8
NP
1087 ClearPageLRU(page);
1088 target = dst;
1089 nr_taken++;
1090 } /* else it is being freed elsewhere */
46453a6e 1091
7c8ee9a8 1092 list_add(&page->lru, target);
1da177e4
LT
1093 }
1094
1095 *scanned = scan;
1096 return nr_taken;
1097}
1098
1099/*
1742f19f
AM
1100 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1101 * of reclaimed pages
1da177e4 1102 */
1742f19f
AM
1103static unsigned long shrink_inactive_list(unsigned long max_scan,
1104 struct zone *zone, struct scan_control *sc)
1da177e4
LT
1105{
1106 LIST_HEAD(page_list);
1107 struct pagevec pvec;
69e05944 1108 unsigned long nr_scanned = 0;
05ff5137 1109 unsigned long nr_reclaimed = 0;
1da177e4
LT
1110
1111 pagevec_init(&pvec, 1);
1112
1113 lru_add_drain();
1114 spin_lock_irq(&zone->lru_lock);
69e05944 1115 do {
1da177e4 1116 struct page *page;
69e05944
AM
1117 unsigned long nr_taken;
1118 unsigned long nr_scan;
1119 unsigned long nr_freed;
1da177e4
LT
1120
1121 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
1122 &zone->inactive_list,
1123 &page_list, &nr_scan);
1124 zone->nr_inactive -= nr_taken;
1125 zone->pages_scanned += nr_scan;
1126 spin_unlock_irq(&zone->lru_lock);
1127
69e05944 1128 nr_scanned += nr_scan;
1742f19f 1129 nr_freed = shrink_page_list(&page_list, sc);
05ff5137 1130 nr_reclaimed += nr_freed;
a74609fa
NP
1131 local_irq_disable();
1132 if (current_is_kswapd()) {
1133 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
1134 __mod_page_state(kswapd_steal, nr_freed);
1135 } else
1136 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
1137 __mod_page_state_zone(zone, pgsteal, nr_freed);
1138
fb8d14e1
WF
1139 if (nr_taken == 0)
1140 goto done;
1141
a74609fa 1142 spin_lock(&zone->lru_lock);
1da177e4
LT
1143 /*
1144 * Put back any unfreeable pages.
1145 */
1146 while (!list_empty(&page_list)) {
1147 page = lru_to_page(&page_list);
8d438f96
NP
1148 BUG_ON(PageLRU(page));
1149 SetPageLRU(page);
1da177e4
LT
1150 list_del(&page->lru);
1151 if (PageActive(page))
1152 add_page_to_active_list(zone, page);
1153 else
1154 add_page_to_inactive_list(zone, page);
1155 if (!pagevec_add(&pvec, page)) {
1156 spin_unlock_irq(&zone->lru_lock);
1157 __pagevec_release(&pvec);
1158 spin_lock_irq(&zone->lru_lock);
1159 }
1160 }
69e05944 1161 } while (nr_scanned < max_scan);
fb8d14e1 1162 spin_unlock(&zone->lru_lock);
1da177e4 1163done:
fb8d14e1 1164 local_irq_enable();
1da177e4 1165 pagevec_release(&pvec);
05ff5137 1166 return nr_reclaimed;
1da177e4
LT
1167}
1168
1169/*
1170 * This moves pages from the active list to the inactive list.
1171 *
1172 * We move them the other way if the page is referenced by one or more
1173 * processes, from rmap.
1174 *
1175 * If the pages are mostly unmapped, the processing is fast and it is
1176 * appropriate to hold zone->lru_lock across the whole operation. But if
1177 * the pages are mapped, the processing is slow (page_referenced()) so we
1178 * should drop zone->lru_lock around each page. It's impossible to balance
1179 * this, so instead we remove the pages from the LRU while processing them.
1180 * It is safe to rely on PG_active against the non-LRU pages in here because
1181 * nobody will play with that bit on a non-LRU page.
1182 *
1183 * The downside is that we have to touch page->_count against each page.
1184 * But we had to alter page->flags anyway.
1185 */
1742f19f
AM
1186static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1187 struct scan_control *sc)
1da177e4 1188{
69e05944 1189 unsigned long pgmoved;
1da177e4 1190 int pgdeactivate = 0;
69e05944 1191 unsigned long pgscanned;
1da177e4
LT
1192 LIST_HEAD(l_hold); /* The pages which were snipped off */
1193 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
1194 LIST_HEAD(l_active); /* Pages to go onto the active_list */
1195 struct page *page;
1196 struct pagevec pvec;
1197 int reclaim_mapped = 0;
2903fb16 1198
6e5ef1a9 1199 if (sc->may_swap) {
2903fb16
CL
1200 long mapped_ratio;
1201 long distress;
1202 long swap_tendency;
1203
1204 /*
1205 * `distress' is a measure of how much trouble we're having
1206 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
1207 */
1208 distress = 100 >> zone->prev_priority;
1209
1210 /*
1211 * The point of this algorithm is to decide when to start
1212 * reclaiming mapped memory instead of just pagecache. Work out
1213 * how much memory
1214 * is mapped.
1215 */
1216 mapped_ratio = (sc->nr_mapped * 100) / total_memory;
1217
1218 /*
1219 * Now decide how much we really want to unmap some pages. The
1220 * mapped ratio is downgraded - just because there's a lot of
1221 * mapped memory doesn't necessarily mean that page reclaim
1222 * isn't succeeding.
1223 *
1224 * The distress ratio is important - we don't want to start
1225 * going oom.
1226 *
1227 * A 100% value of vm_swappiness overrides this algorithm
1228 * altogether.
1229 */
1230 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
1231
1232 /*
1233 * Now use this metric to decide whether to start moving mapped
1234 * memory onto the inactive list.
1235 */
1236 if (swap_tendency >= 100)
1237 reclaim_mapped = 1;
1238 }
1da177e4
LT
1239
1240 lru_add_drain();
1241 spin_lock_irq(&zone->lru_lock);
1242 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
1243 &l_hold, &pgscanned);
1244 zone->pages_scanned += pgscanned;
1245 zone->nr_active -= pgmoved;
1246 spin_unlock_irq(&zone->lru_lock);
1247
1da177e4
LT
1248 while (!list_empty(&l_hold)) {
1249 cond_resched();
1250 page = lru_to_page(&l_hold);
1251 list_del(&page->lru);
1252 if (page_mapped(page)) {
1253 if (!reclaim_mapped ||
1254 (total_swap_pages == 0 && PageAnon(page)) ||
f7b7fd8f 1255 page_referenced(page, 0)) {
1da177e4
LT
1256 list_add(&page->lru, &l_active);
1257 continue;
1258 }
1259 }
1260 list_add(&page->lru, &l_inactive);
1261 }
1262
1263 pagevec_init(&pvec, 1);
1264 pgmoved = 0;
1265 spin_lock_irq(&zone->lru_lock);
1266 while (!list_empty(&l_inactive)) {
1267 page = lru_to_page(&l_inactive);
1268 prefetchw_prev_lru_page(page, &l_inactive, flags);
8d438f96
NP
1269 BUG_ON(PageLRU(page));
1270 SetPageLRU(page);
4c84cacf
NP
1271 BUG_ON(!PageActive(page));
1272 ClearPageActive(page);
1273
1da177e4
LT
1274 list_move(&page->lru, &zone->inactive_list);
1275 pgmoved++;
1276 if (!pagevec_add(&pvec, page)) {
1277 zone->nr_inactive += pgmoved;
1278 spin_unlock_irq(&zone->lru_lock);
1279 pgdeactivate += pgmoved;
1280 pgmoved = 0;
1281 if (buffer_heads_over_limit)
1282 pagevec_strip(&pvec);
1283 __pagevec_release(&pvec);
1284 spin_lock_irq(&zone->lru_lock);
1285 }
1286 }
1287 zone->nr_inactive += pgmoved;
1288 pgdeactivate += pgmoved;
1289 if (buffer_heads_over_limit) {
1290 spin_unlock_irq(&zone->lru_lock);
1291 pagevec_strip(&pvec);
1292 spin_lock_irq(&zone->lru_lock);
1293 }
1294
1295 pgmoved = 0;
1296 while (!list_empty(&l_active)) {
1297 page = lru_to_page(&l_active);
1298 prefetchw_prev_lru_page(page, &l_active, flags);
8d438f96
NP
1299 BUG_ON(PageLRU(page));
1300 SetPageLRU(page);
1da177e4
LT
1301 BUG_ON(!PageActive(page));
1302 list_move(&page->lru, &zone->active_list);
1303 pgmoved++;
1304 if (!pagevec_add(&pvec, page)) {
1305 zone->nr_active += pgmoved;
1306 pgmoved = 0;
1307 spin_unlock_irq(&zone->lru_lock);
1308 __pagevec_release(&pvec);
1309 spin_lock_irq(&zone->lru_lock);
1310 }
1311 }
1312 zone->nr_active += pgmoved;
a74609fa
NP
1313 spin_unlock(&zone->lru_lock);
1314
1315 __mod_page_state_zone(zone, pgrefill, pgscanned);
1316 __mod_page_state(pgdeactivate, pgdeactivate);
1317 local_irq_enable();
1da177e4 1318
a74609fa 1319 pagevec_release(&pvec);
1da177e4
LT
1320}
1321
1322/*
1323 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1324 */
05ff5137
AM
1325static unsigned long shrink_zone(int priority, struct zone *zone,
1326 struct scan_control *sc)
1da177e4
LT
1327{
1328 unsigned long nr_active;
1329 unsigned long nr_inactive;
8695949a 1330 unsigned long nr_to_scan;
05ff5137 1331 unsigned long nr_reclaimed = 0;
1da177e4 1332
53e9a615
MH
1333 atomic_inc(&zone->reclaim_in_progress);
1334
1da177e4
LT
1335 /*
1336 * Add one to `nr_to_scan' just to make sure that the kernel will
1337 * slowly sift through the active list.
1338 */
8695949a 1339 zone->nr_scan_active += (zone->nr_active >> priority) + 1;
1da177e4
LT
1340 nr_active = zone->nr_scan_active;
1341 if (nr_active >= sc->swap_cluster_max)
1342 zone->nr_scan_active = 0;
1343 else
1344 nr_active = 0;
1345
8695949a 1346 zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
1da177e4
LT
1347 nr_inactive = zone->nr_scan_inactive;
1348 if (nr_inactive >= sc->swap_cluster_max)
1349 zone->nr_scan_inactive = 0;
1350 else
1351 nr_inactive = 0;
1352
1da177e4
LT
1353 while (nr_active || nr_inactive) {
1354 if (nr_active) {
8695949a 1355 nr_to_scan = min(nr_active,
1da177e4 1356 (unsigned long)sc->swap_cluster_max);
8695949a 1357 nr_active -= nr_to_scan;
1742f19f 1358 shrink_active_list(nr_to_scan, zone, sc);
1da177e4
LT
1359 }
1360
1361 if (nr_inactive) {
8695949a 1362 nr_to_scan = min(nr_inactive,
1da177e4 1363 (unsigned long)sc->swap_cluster_max);
8695949a 1364 nr_inactive -= nr_to_scan;
1742f19f
AM
1365 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1366 sc);
1da177e4
LT
1367 }
1368 }
1369
1370 throttle_vm_writeout();
53e9a615
MH
1371
1372 atomic_dec(&zone->reclaim_in_progress);
05ff5137 1373 return nr_reclaimed;
1da177e4
LT
1374}
1375
1376/*
1377 * This is the direct reclaim path, for page-allocating processes. We only
1378 * try to reclaim pages from zones which will satisfy the caller's allocation
1379 * request.
1380 *
1381 * We reclaim from a zone even if that zone is over pages_high. Because:
1382 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1383 * allocation or
1384 * b) The zones may be over pages_high but they must go *over* pages_high to
1385 * satisfy the `incremental min' zone defense algorithm.
1386 *
1387 * Returns the number of reclaimed pages.
1388 *
1389 * If a zone is deemed to be full of pinned pages then just give it a light
1390 * scan then give up on it.
1391 */
1742f19f 1392static unsigned long shrink_zones(int priority, struct zone **zones,
05ff5137 1393 struct scan_control *sc)
1da177e4 1394{
05ff5137 1395 unsigned long nr_reclaimed = 0;
1da177e4
LT
1396 int i;
1397
1398 for (i = 0; zones[i] != NULL; i++) {
1399 struct zone *zone = zones[i];
1400
f3fe6512 1401 if (!populated_zone(zone))
1da177e4
LT
1402 continue;
1403
9bf2229f 1404 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4
LT
1405 continue;
1406
8695949a
CL
1407 zone->temp_priority = priority;
1408 if (zone->prev_priority > priority)
1409 zone->prev_priority = priority;
1da177e4 1410
8695949a 1411 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1da177e4
LT
1412 continue; /* Let kswapd poll it */
1413
05ff5137 1414 nr_reclaimed += shrink_zone(priority, zone, sc);
1da177e4 1415 }
05ff5137 1416 return nr_reclaimed;
1da177e4
LT
1417}
1418
1419/*
1420 * This is the main entry point to direct page reclaim.
1421 *
1422 * If a full scan of the inactive list fails to free enough memory then we
1423 * are "out of memory" and something needs to be killed.
1424 *
1425 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1426 * high - the zone may be full of dirty or under-writeback pages, which this
1427 * caller can't do much about. We kick pdflush and take explicit naps in the
1428 * hope that some of these pages can be written. But if the allocating task
1429 * holds filesystem locks which prevent writeout this might not work, and the
1430 * allocation attempt will fail.
1431 */
69e05944 1432unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1da177e4
LT
1433{
1434 int priority;
1435 int ret = 0;
69e05944 1436 unsigned long total_scanned = 0;
05ff5137 1437 unsigned long nr_reclaimed = 0;
1da177e4 1438 struct reclaim_state *reclaim_state = current->reclaim_state;
1da177e4
LT
1439 unsigned long lru_pages = 0;
1440 int i;
179e9639
AM
1441 struct scan_control sc = {
1442 .gfp_mask = gfp_mask,
1443 .may_writepage = !laptop_mode,
1444 .swap_cluster_max = SWAP_CLUSTER_MAX,
1445 .may_swap = 1,
1446 };
1da177e4
LT
1447
1448 inc_page_state(allocstall);
1449
1450 for (i = 0; zones[i] != NULL; i++) {
1451 struct zone *zone = zones[i];
1452
9bf2229f 1453 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4
LT
1454 continue;
1455
1456 zone->temp_priority = DEF_PRIORITY;
1457 lru_pages += zone->nr_active + zone->nr_inactive;
1458 }
1459
1460 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1461 sc.nr_mapped = read_page_state(nr_mapped);
1462 sc.nr_scanned = 0;
f7b7fd8f
RR
1463 if (!priority)
1464 disable_swap_token();
1742f19f 1465 nr_reclaimed += shrink_zones(priority, zones, &sc);
1da177e4
LT
1466 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1467 if (reclaim_state) {
05ff5137 1468 nr_reclaimed += reclaim_state->reclaimed_slab;
1da177e4
LT
1469 reclaim_state->reclaimed_slab = 0;
1470 }
1471 total_scanned += sc.nr_scanned;
05ff5137 1472 if (nr_reclaimed >= sc.swap_cluster_max) {
1da177e4
LT
1473 ret = 1;
1474 goto out;
1475 }
1476
1477 /*
1478 * Try to write back as many pages as we just scanned. This
1479 * tends to cause slow streaming writers to write data to the
1480 * disk smoothly, at the dirtying rate, which is nice. But
1481 * that's undesirable in laptop mode, where we *want* lumpy
1482 * writeout. So in laptop mode, write out the whole world.
1483 */
179e9639
AM
1484 if (total_scanned > sc.swap_cluster_max +
1485 sc.swap_cluster_max / 2) {
687a21ce 1486 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1da177e4
LT
1487 sc.may_writepage = 1;
1488 }
1489
1490 /* Take a nap, wait for some writeback to complete */
1491 if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1492 blk_congestion_wait(WRITE, HZ/10);
1493 }
1494out:
1495 for (i = 0; zones[i] != 0; i++) {
1496 struct zone *zone = zones[i];
1497
9bf2229f 1498 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4
LT
1499 continue;
1500
1501 zone->prev_priority = zone->temp_priority;
1502 }
1503 return ret;
1504}
1505
1506/*
1507 * For kswapd, balance_pgdat() will work across all this node's zones until
1508 * they are all at pages_high.
1509 *
1510 * If `nr_pages' is non-zero then it is the number of pages which are to be
1511 * reclaimed, regardless of the zone occupancies. This is a software suspend
1512 * special.
1513 *
1514 * Returns the number of pages which were actually freed.
1515 *
1516 * There is special handling here for zones which are full of pinned pages.
1517 * This can happen if the pages are all mlocked, or if they are all used by
1518 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1519 * What we do is to detect the case where all pages in the zone have been
1520 * scanned twice and there has been zero successful reclaim. Mark the zone as
1521 * dead and from now on, only perform a short scan. Basically we're polling
1522 * the zone for when the problem goes away.
1523 *
1524 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1525 * zones which have free_pages > pages_high, but once a zone is found to have
1526 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1527 * of the number of free pages in the lower zones. This interoperates with
1528 * the page allocator fallback scheme to ensure that aging of pages is balanced
1529 * across the zones.
1530 */
69e05944
AM
1531static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
1532 int order)
1da177e4 1533{
69e05944 1534 unsigned long to_free = nr_pages;
1da177e4
LT
1535 int all_zones_ok;
1536 int priority;
1537 int i;
69e05944 1538 unsigned long total_scanned;
05ff5137 1539 unsigned long nr_reclaimed;
1da177e4 1540 struct reclaim_state *reclaim_state = current->reclaim_state;
179e9639
AM
1541 struct scan_control sc = {
1542 .gfp_mask = GFP_KERNEL,
1543 .may_swap = 1,
1544 .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
1545 };
1da177e4
LT
1546
1547loop_again:
1548 total_scanned = 0;
05ff5137 1549 nr_reclaimed = 0;
179e9639 1550 sc.may_writepage = !laptop_mode,
1da177e4
LT
1551 sc.nr_mapped = read_page_state(nr_mapped);
1552
1553 inc_page_state(pageoutrun);
1554
1555 for (i = 0; i < pgdat->nr_zones; i++) {
1556 struct zone *zone = pgdat->node_zones + i;
1557
1558 zone->temp_priority = DEF_PRIORITY;
1559 }
1560
1561 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1562 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1563 unsigned long lru_pages = 0;
1564
f7b7fd8f
RR
1565 /* The swap token gets in the way of swapout... */
1566 if (!priority)
1567 disable_swap_token();
1568
1da177e4
LT
1569 all_zones_ok = 1;
1570
1571 if (nr_pages == 0) {
1572 /*
1573 * Scan in the highmem->dma direction for the highest
1574 * zone which needs scanning
1575 */
1576 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1577 struct zone *zone = pgdat->node_zones + i;
1578
f3fe6512 1579 if (!populated_zone(zone))
1da177e4
LT
1580 continue;
1581
1582 if (zone->all_unreclaimable &&
1583 priority != DEF_PRIORITY)
1584 continue;
1585
1586 if (!zone_watermark_ok(zone, order,
7fb1d9fc 1587 zone->pages_high, 0, 0)) {
1da177e4
LT
1588 end_zone = i;
1589 goto scan;
1590 }
1591 }
1592 goto out;
1593 } else {
1594 end_zone = pgdat->nr_zones - 1;
1595 }
1596scan:
1597 for (i = 0; i <= end_zone; i++) {
1598 struct zone *zone = pgdat->node_zones + i;
1599
1600 lru_pages += zone->nr_active + zone->nr_inactive;
1601 }
1602
1603 /*
1604 * Now scan the zone in the dma->highmem direction, stopping
1605 * at the last zone which needs scanning.
1606 *
1607 * We do this because the page allocator works in the opposite
1608 * direction. This prevents the page allocator from allocating
1609 * pages behind kswapd's direction of progress, which would
1610 * cause too much scanning of the lower zones.
1611 */
1612 for (i = 0; i <= end_zone; i++) {
1613 struct zone *zone = pgdat->node_zones + i;
b15e0905 1614 int nr_slab;
1da177e4 1615
f3fe6512 1616 if (!populated_zone(zone))
1da177e4
LT
1617 continue;
1618
1619 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1620 continue;
1621
1622 if (nr_pages == 0) { /* Not software suspend */
1623 if (!zone_watermark_ok(zone, order,
7fb1d9fc 1624 zone->pages_high, end_zone, 0))
1da177e4
LT
1625 all_zones_ok = 0;
1626 }
1627 zone->temp_priority = priority;
1628 if (zone->prev_priority > priority)
1629 zone->prev_priority = priority;
1630 sc.nr_scanned = 0;
05ff5137 1631 nr_reclaimed += shrink_zone(priority, zone, &sc);
1da177e4 1632 reclaim_state->reclaimed_slab = 0;
b15e0905
AM
1633 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1634 lru_pages);
05ff5137 1635 nr_reclaimed += reclaim_state->reclaimed_slab;
1da177e4
LT
1636 total_scanned += sc.nr_scanned;
1637 if (zone->all_unreclaimable)
1638 continue;
b15e0905
AM
1639 if (nr_slab == 0 && zone->pages_scanned >=
1640 (zone->nr_active + zone->nr_inactive) * 4)
1da177e4
LT
1641 zone->all_unreclaimable = 1;
1642 /*
1643 * If we've done a decent amount of scanning and
1644 * the reclaim ratio is low, start doing writepage
1645 * even in laptop mode
1646 */
1647 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
05ff5137 1648 total_scanned > nr_reclaimed + nr_reclaimed / 2)
1da177e4
LT
1649 sc.may_writepage = 1;
1650 }
05ff5137 1651 if (nr_pages && to_free > nr_reclaimed)
1da177e4
LT
1652 continue; /* swsusp: need to do more work */
1653 if (all_zones_ok)
1654 break; /* kswapd: all done */
1655 /*
1656 * OK, kswapd is getting into trouble. Take a nap, then take
1657 * another pass across the zones.
1658 */
1659 if (total_scanned && priority < DEF_PRIORITY - 2)
1660 blk_congestion_wait(WRITE, HZ/10);
1661
1662 /*
1663 * We do this so kswapd doesn't build up large priorities for
1664 * example when it is freeing in parallel with allocators. It
1665 * matches the direct reclaim path behaviour in terms of impact
1666 * on zone->*_priority.
1667 */
05ff5137 1668 if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages)
1da177e4
LT
1669 break;
1670 }
1671out:
1672 for (i = 0; i < pgdat->nr_zones; i++) {
1673 struct zone *zone = pgdat->node_zones + i;
1674
1675 zone->prev_priority = zone->temp_priority;
1676 }
1677 if (!all_zones_ok) {
1678 cond_resched();
1679 goto loop_again;
1680 }
1681
05ff5137 1682 return nr_reclaimed;
1da177e4
LT
1683}
1684
1685/*
1686 * The background pageout daemon, started as a kernel thread
1687 * from the init process.
1688 *
1689 * This basically trickles out pages so that we have _some_
1690 * free memory available even if there is no other activity
1691 * that frees anything up. This is needed for things like routing
1692 * etc, where we otherwise might have all activity going on in
1693 * asynchronous contexts that cannot page things out.
1694 *
1695 * If there are applications that are active memory-allocators
1696 * (most normal use), this basically shouldn't matter.
1697 */
1698static int kswapd(void *p)
1699{
1700 unsigned long order;
1701 pg_data_t *pgdat = (pg_data_t*)p;
1702 struct task_struct *tsk = current;
1703 DEFINE_WAIT(wait);
1704 struct reclaim_state reclaim_state = {
1705 .reclaimed_slab = 0,
1706 };
1707 cpumask_t cpumask;
1708
1709 daemonize("kswapd%d", pgdat->node_id);
1710 cpumask = node_to_cpumask(pgdat->node_id);
1711 if (!cpus_empty(cpumask))
1712 set_cpus_allowed(tsk, cpumask);
1713 current->reclaim_state = &reclaim_state;
1714
1715 /*
1716 * Tell the memory management that we're a "memory allocator",
1717 * and that if we need more memory we should get access to it
1718 * regardless (see "__alloc_pages()"). "kswapd" should
1719 * never get caught in the normal page freeing logic.
1720 *
1721 * (Kswapd normally doesn't need memory anyway, but sometimes
1722 * you need a small amount of memory in order to be able to
1723 * page out something else, and this flag essentially protects
1724 * us from recursively trying to free more memory as we're
1725 * trying to free the first piece of memory in the first place).
1726 */
930d9152 1727 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1da177e4
LT
1728
1729 order = 0;
1730 for ( ; ; ) {
1731 unsigned long new_order;
3e1d1d28
CL
1732
1733 try_to_freeze();
1da177e4
LT
1734
1735 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1736 new_order = pgdat->kswapd_max_order;
1737 pgdat->kswapd_max_order = 0;
1738 if (order < new_order) {
1739 /*
1740 * Don't sleep if someone wants a larger 'order'
1741 * allocation
1742 */
1743 order = new_order;
1744 } else {
1745 schedule();
1746 order = pgdat->kswapd_max_order;
1747 }
1748 finish_wait(&pgdat->kswapd_wait, &wait);
1749
1750 balance_pgdat(pgdat, 0, order);
1751 }
1752 return 0;
1753}
1754
1755/*
1756 * A zone is low on free memory, so wake its kswapd task to service it.
1757 */
1758void wakeup_kswapd(struct zone *zone, int order)
1759{
1760 pg_data_t *pgdat;
1761
f3fe6512 1762 if (!populated_zone(zone))
1da177e4
LT
1763 return;
1764
1765 pgdat = zone->zone_pgdat;
7fb1d9fc 1766 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1da177e4
LT
1767 return;
1768 if (pgdat->kswapd_max_order < order)
1769 pgdat->kswapd_max_order = order;
9bf2229f 1770 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1da177e4 1771 return;
8d0986e2 1772 if (!waitqueue_active(&pgdat->kswapd_wait))
1da177e4 1773 return;
8d0986e2 1774 wake_up_interruptible(&pgdat->kswapd_wait);
1da177e4
LT
1775}
1776
1777#ifdef CONFIG_PM
1778/*
1779 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1780 * pages.
1781 */
69e05944 1782unsigned long shrink_all_memory(unsigned long nr_pages)
1da177e4
LT
1783{
1784 pg_data_t *pgdat;
69e05944
AM
1785 unsigned long nr_to_free = nr_pages;
1786 unsigned long ret = 0;
1da177e4
LT
1787 struct reclaim_state reclaim_state = {
1788 .reclaimed_slab = 0,
1789 };
1790
1791 current->reclaim_state = &reclaim_state;
1792 for_each_pgdat(pgdat) {
69e05944
AM
1793 unsigned long freed;
1794
1da177e4
LT
1795 freed = balance_pgdat(pgdat, nr_to_free, 0);
1796 ret += freed;
1797 nr_to_free -= freed;
69e05944 1798 if ((long)nr_to_free <= 0)
1da177e4
LT
1799 break;
1800 }
1801 current->reclaim_state = NULL;
1802 return ret;
1803}
1804#endif
1805
1806#ifdef CONFIG_HOTPLUG_CPU
1807/* It's optimal to keep kswapds on the same CPUs as their memory, but
1808 not required for correctness. So if the last cpu in a node goes
1809 away, we get changed to run anywhere: as the first one comes back,
1810 restore their cpu bindings. */
1811static int __devinit cpu_callback(struct notifier_block *nfb,
69e05944 1812 unsigned long action, void *hcpu)
1da177e4
LT
1813{
1814 pg_data_t *pgdat;
1815 cpumask_t mask;
1816
1817 if (action == CPU_ONLINE) {
1818 for_each_pgdat(pgdat) {
1819 mask = node_to_cpumask(pgdat->node_id);
1820 if (any_online_cpu(mask) != NR_CPUS)
1821 /* One of our CPUs online: restore mask */
1822 set_cpus_allowed(pgdat->kswapd, mask);
1823 }
1824 }
1825 return NOTIFY_OK;
1826}
1827#endif /* CONFIG_HOTPLUG_CPU */
1828
1829static int __init kswapd_init(void)
1830{
1831 pg_data_t *pgdat;
69e05944 1832
1da177e4 1833 swap_setup();
69e05944
AM
1834 for_each_pgdat(pgdat) {
1835 pid_t pid;
1836
1837 pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1838 BUG_ON(pid < 0);
1839 pgdat->kswapd = find_task_by_pid(pid);
1840 }
1da177e4
LT
1841 total_memory = nr_free_pagecache_pages();
1842 hotcpu_notifier(cpu_callback, 0);
1843 return 0;
1844}
1845
1846module_init(kswapd_init)
9eeff239
CL
1847
1848#ifdef CONFIG_NUMA
1849/*
1850 * Zone reclaim mode
1851 *
1852 * If non-zero call zone_reclaim when the number of free pages falls below
1853 * the watermarks.
1854 *
1855 * In the future we may add flags to the mode. However, the page allocator
1856 * should only have to check that zone_reclaim_mode != 0 before calling
1857 * zone_reclaim().
1858 */
1859int zone_reclaim_mode __read_mostly;
1860
1b2ffb78
CL
1861#define RECLAIM_OFF 0
1862#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1863#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1864#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
2a16e3f4 1865#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
1b2ffb78 1866
9eeff239
CL
1867/*
1868 * Mininum time between zone reclaim scans
1869 */
2a11ff06 1870int zone_reclaim_interval __read_mostly = 30*HZ;
a92f7126
CL
1871
1872/*
1873 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1874 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1875 * a zone.
1876 */
1877#define ZONE_RECLAIM_PRIORITY 4
1878
9eeff239
CL
1879/*
1880 * Try to free up some pages from this zone through reclaim.
1881 */
179e9639 1882static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
9eeff239 1883{
7fb2d46d 1884 /* Minimum pages needed in order to stay on node */
69e05944 1885 const unsigned long nr_pages = 1 << order;
9eeff239
CL
1886 struct task_struct *p = current;
1887 struct reclaim_state reclaim_state;
8695949a 1888 int priority;
05ff5137 1889 unsigned long nr_reclaimed = 0;
179e9639
AM
1890 struct scan_control sc = {
1891 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1892 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1893 .nr_mapped = read_page_state(nr_mapped),
69e05944
AM
1894 .swap_cluster_max = max_t(unsigned long, nr_pages,
1895 SWAP_CLUSTER_MAX),
179e9639
AM
1896 .gfp_mask = gfp_mask,
1897 };
9eeff239
CL
1898
1899 disable_swap_token();
9eeff239 1900 cond_resched();
d4f7796e
CL
1901 /*
1902 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1903 * and we also need to be able to write out pages for RECLAIM_WRITE
1904 * and RECLAIM_SWAP.
1905 */
1906 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
9eeff239
CL
1907 reclaim_state.reclaimed_slab = 0;
1908 p->reclaim_state = &reclaim_state;
c84db23c 1909
a92f7126
CL
1910 /*
1911 * Free memory by calling shrink zone with increasing priorities
1912 * until we have enough memory freed.
1913 */
8695949a 1914 priority = ZONE_RECLAIM_PRIORITY;
a92f7126 1915 do {
05ff5137 1916 nr_reclaimed += shrink_zone(priority, zone, &sc);
8695949a 1917 priority--;
05ff5137 1918 } while (priority >= 0 && nr_reclaimed < nr_pages);
c84db23c 1919
05ff5137 1920 if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
2a16e3f4 1921 /*
7fb2d46d
CL
1922 * shrink_slab() does not currently allow us to determine how
1923 * many pages were freed in this zone. So we just shake the slab
1924 * a bit and then go off node for this particular allocation
1925 * despite possibly having freed enough memory to allocate in
1926 * this zone. If we freed local memory then the next
1927 * allocations will be local again.
2a16e3f4
CL
1928 *
1929 * shrink_slab will free memory on all zones and may take
1930 * a long time.
1931 */
1932 shrink_slab(sc.nr_scanned, gfp_mask, order);
2a16e3f4
CL
1933 }
1934
9eeff239 1935 p->reclaim_state = NULL;
d4f7796e 1936 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
9eeff239 1937
7fb2d46d
CL
1938 if (nr_reclaimed == 0) {
1939 /*
1940 * We were unable to reclaim enough pages to stay on node. We
1941 * now allow off node accesses for a certain time period before
1942 * trying again to reclaim pages from the local zone.
1943 */
9eeff239 1944 zone->last_unsuccessful_zone_reclaim = jiffies;
7fb2d46d 1945 }
9eeff239 1946
05ff5137 1947 return nr_reclaimed >= nr_pages;
9eeff239 1948}
179e9639
AM
1949
1950int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1951{
1952 cpumask_t mask;
1953 int node_id;
1954
1955 /*
1956 * Do not reclaim if there was a recent unsuccessful attempt at zone
1957 * reclaim. In that case we let allocations go off node for the
1958 * zone_reclaim_interval. Otherwise we would scan for each off-node
1959 * page allocation.
1960 */
1961 if (time_before(jiffies,
1962 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval))
1963 return 0;
1964
1965 /*
1966 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1967 * not have reclaimable pages and if we should not delay the allocation
1968 * then do not scan.
1969 */
1970 if (!(gfp_mask & __GFP_WAIT) ||
1971 zone->all_unreclaimable ||
1972 atomic_read(&zone->reclaim_in_progress) > 0 ||
1973 (current->flags & PF_MEMALLOC))
1974 return 0;
1975
1976 /*
1977 * Only run zone reclaim on the local zone or on zones that do not
1978 * have associated processors. This will favor the local processor
1979 * over remote processors and spread off node memory allocations
1980 * as wide as possible.
1981 */
1982 node_id = zone->zone_pgdat->node_id;
1983 mask = node_to_cpumask(node_id);
1984 if (!cpus_empty(mask) && node_id != numa_node_id())
1985 return 0;
1986 return __zone_reclaim(zone, gfp_mask, order);
1987}
9eeff239 1988#endif