]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/page_alloc.c
vmstat: update zone stat threshold when onlining a cpu
[net-next-2.6.git] / mm / page_alloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
1da177e4
LT
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
10ed273f 22#include <linux/jiffies.h>
1da177e4
LT
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
9f158333 25#include <linux/kernel.h>
b1eeab67 26#include <linux/kmemcheck.h>
1da177e4
LT
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
5a3135c2 32#include <linux/oom.h>
1da177e4
LT
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
bdc8cb98 38#include <linux/memory_hotplug.h>
1da177e4
LT
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
4be38e35 41#include <linux/mempolicy.h>
6811378e 42#include <linux/stop_machine.h>
c713216d
MG
43#include <linux/sort.h>
44#include <linux/pfn.h>
3fcfab16 45#include <linux/backing-dev.h>
933e312e 46#include <linux/fault-inject.h>
a5d76b54 47#include <linux/page-isolation.h>
52d4b9ac 48#include <linux/page_cgroup.h>
3ac7fe5a 49#include <linux/debugobjects.h>
dbb1f81c 50#include <linux/kmemleak.h>
925cc71e 51#include <linux/memory.h>
56de7263 52#include <linux/compaction.h>
0d3d062a 53#include <trace/events/kmem.h>
718a3821 54#include <linux/ftrace_event.h>
1da177e4
LT
55
56#include <asm/tlbflush.h>
ac924c60 57#include <asm/div64.h>
1da177e4
LT
58#include "internal.h"
59
72812019
LS
60#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
61DEFINE_PER_CPU(int, numa_node);
62EXPORT_PER_CPU_SYMBOL(numa_node);
63#endif
64
7aac7898
LS
65#ifdef CONFIG_HAVE_MEMORYLESS_NODES
66/*
67 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
68 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
69 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
70 * defined in <linux/topology.h>.
71 */
72DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
73EXPORT_PER_CPU_SYMBOL(_numa_mem_);
74#endif
75
1da177e4 76/*
13808910 77 * Array of node states.
1da177e4 78 */
13808910
CL
79nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
80 [N_POSSIBLE] = NODE_MASK_ALL,
81 [N_ONLINE] = { { [0] = 1UL } },
82#ifndef CONFIG_NUMA
83 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
84#ifdef CONFIG_HIGHMEM
85 [N_HIGH_MEMORY] = { { [0] = 1UL } },
86#endif
87 [N_CPU] = { { [0] = 1UL } },
88#endif /* NUMA */
89};
90EXPORT_SYMBOL(node_states);
91
6c231b7b 92unsigned long totalram_pages __read_mostly;
cb45b0e9 93unsigned long totalreserve_pages __read_mostly;
8ad4b1fb 94int percpu_pagelist_fraction;
dcce284a 95gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1da177e4 96
452aa699
RW
97#ifdef CONFIG_PM_SLEEP
98/*
99 * The following functions are used by the suspend/hibernate code to temporarily
100 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
101 * while devices are suspended. To avoid races with the suspend/hibernate code,
102 * they should always be called with pm_mutex held (gfp_allowed_mask also should
103 * only be modified with pm_mutex held, unless the suspend/hibernate code is
104 * guaranteed not to run in parallel with that modification).
105 */
106void set_gfp_allowed_mask(gfp_t mask)
107{
108 WARN_ON(!mutex_is_locked(&pm_mutex));
109 gfp_allowed_mask = mask;
110}
111
112gfp_t clear_gfp_allowed_mask(gfp_t mask)
113{
114 gfp_t ret = gfp_allowed_mask;
115
116 WARN_ON(!mutex_is_locked(&pm_mutex));
117 gfp_allowed_mask &= ~mask;
118 return ret;
119}
120#endif /* CONFIG_PM_SLEEP */
121
d9c23400
MG
122#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
123int pageblock_order __read_mostly;
124#endif
125
d98c7a09 126static void __free_pages_ok(struct page *page, unsigned int order);
a226f6c8 127
1da177e4
LT
128/*
129 * results with 256, 32 in the lowmem_reserve sysctl:
130 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
131 * 1G machine -> (16M dma, 784M normal, 224M high)
132 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
133 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
134 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
a2f1b424
AK
135 *
136 * TBD: should special case ZONE_DMA32 machines here - in those we normally
137 * don't need any ZONE_NORMAL reservation
1da177e4 138 */
2f1b6248 139int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
4b51d669 140#ifdef CONFIG_ZONE_DMA
2f1b6248 141 256,
4b51d669 142#endif
fb0e7942 143#ifdef CONFIG_ZONE_DMA32
2f1b6248 144 256,
fb0e7942 145#endif
e53ef38d 146#ifdef CONFIG_HIGHMEM
2a1e274a 147 32,
e53ef38d 148#endif
2a1e274a 149 32,
2f1b6248 150};
1da177e4
LT
151
152EXPORT_SYMBOL(totalram_pages);
1da177e4 153
15ad7cdc 154static char * const zone_names[MAX_NR_ZONES] = {
4b51d669 155#ifdef CONFIG_ZONE_DMA
2f1b6248 156 "DMA",
4b51d669 157#endif
fb0e7942 158#ifdef CONFIG_ZONE_DMA32
2f1b6248 159 "DMA32",
fb0e7942 160#endif
2f1b6248 161 "Normal",
e53ef38d 162#ifdef CONFIG_HIGHMEM
2a1e274a 163 "HighMem",
e53ef38d 164#endif
2a1e274a 165 "Movable",
2f1b6248
CL
166};
167
1da177e4
LT
168int min_free_kbytes = 1024;
169
2c85f51d
JB
170static unsigned long __meminitdata nr_kernel_pages;
171static unsigned long __meminitdata nr_all_pages;
a3142c8e 172static unsigned long __meminitdata dma_reserve;
1da177e4 173
c713216d
MG
174#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
175 /*
183ff22b 176 * MAX_ACTIVE_REGIONS determines the maximum number of distinct
c713216d
MG
177 * ranges of memory (RAM) that may be registered with add_active_range().
178 * Ranges passed to add_active_range() will be merged if possible
179 * so the number of times add_active_range() can be called is
180 * related to the number of nodes and the number of holes
181 */
182 #ifdef CONFIG_MAX_ACTIVE_REGIONS
183 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
184 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
185 #else
186 #if MAX_NUMNODES >= 32
187 /* If there can be many nodes, allow up to 50 holes per node */
188 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
189 #else
190 /* By default, allow up to 256 distinct regions */
191 #define MAX_ACTIVE_REGIONS 256
192 #endif
193 #endif
194
98011f56
JB
195 static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
196 static int __meminitdata nr_nodemap_entries;
197 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
198 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
b69a7288 199 static unsigned long __initdata required_kernelcore;
484f51f8 200 static unsigned long __initdata required_movablecore;
b69a7288 201 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
2a1e274a
MG
202
203 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
204 int movable_zone;
205 EXPORT_SYMBOL(movable_zone);
c713216d
MG
206#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
207
418508c1
MS
208#if MAX_NUMNODES > 1
209int nr_node_ids __read_mostly = MAX_NUMNODES;
62bc62a8 210int nr_online_nodes __read_mostly = 1;
418508c1 211EXPORT_SYMBOL(nr_node_ids);
62bc62a8 212EXPORT_SYMBOL(nr_online_nodes);
418508c1
MS
213#endif
214
9ef9acb0
MG
215int page_group_by_mobility_disabled __read_mostly;
216
b2a0ac88
MG
217static void set_pageblock_migratetype(struct page *page, int migratetype)
218{
49255c61
MG
219
220 if (unlikely(page_group_by_mobility_disabled))
221 migratetype = MIGRATE_UNMOVABLE;
222
b2a0ac88
MG
223 set_pageblock_flags_group(page, (unsigned long)migratetype,
224 PB_migrate, PB_migrate_end);
225}
226
7f33d49a
RW
227bool oom_killer_disabled __read_mostly;
228
13e7444b 229#ifdef CONFIG_DEBUG_VM
c6a57e19 230static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
1da177e4 231{
bdc8cb98
DH
232 int ret = 0;
233 unsigned seq;
234 unsigned long pfn = page_to_pfn(page);
c6a57e19 235
bdc8cb98
DH
236 do {
237 seq = zone_span_seqbegin(zone);
238 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
239 ret = 1;
240 else if (pfn < zone->zone_start_pfn)
241 ret = 1;
242 } while (zone_span_seqretry(zone, seq));
243
244 return ret;
c6a57e19
DH
245}
246
247static int page_is_consistent(struct zone *zone, struct page *page)
248{
14e07298 249 if (!pfn_valid_within(page_to_pfn(page)))
c6a57e19 250 return 0;
1da177e4 251 if (zone != page_zone(page))
c6a57e19
DH
252 return 0;
253
254 return 1;
255}
256/*
257 * Temporary debugging check for pages not lying within a given zone.
258 */
259static int bad_range(struct zone *zone, struct page *page)
260{
261 if (page_outside_zone_boundaries(zone, page))
1da177e4 262 return 1;
c6a57e19
DH
263 if (!page_is_consistent(zone, page))
264 return 1;
265
1da177e4
LT
266 return 0;
267}
13e7444b
NP
268#else
269static inline int bad_range(struct zone *zone, struct page *page)
270{
271 return 0;
272}
273#endif
274
224abf92 275static void bad_page(struct page *page)
1da177e4 276{
d936cf9b
HD
277 static unsigned long resume;
278 static unsigned long nr_shown;
279 static unsigned long nr_unshown;
280
2a7684a2
WF
281 /* Don't complain about poisoned pages */
282 if (PageHWPoison(page)) {
283 __ClearPageBuddy(page);
284 return;
285 }
286
d936cf9b
HD
287 /*
288 * Allow a burst of 60 reports, then keep quiet for that minute;
289 * or allow a steady drip of one report per second.
290 */
291 if (nr_shown == 60) {
292 if (time_before(jiffies, resume)) {
293 nr_unshown++;
294 goto out;
295 }
296 if (nr_unshown) {
1e9e6365
HD
297 printk(KERN_ALERT
298 "BUG: Bad page state: %lu messages suppressed\n",
d936cf9b
HD
299 nr_unshown);
300 nr_unshown = 0;
301 }
302 nr_shown = 0;
303 }
304 if (nr_shown++ == 0)
305 resume = jiffies + 60 * HZ;
306
1e9e6365 307 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
3dc14741 308 current->comm, page_to_pfn(page));
718a3821 309 dump_page(page);
3dc14741 310
1da177e4 311 dump_stack();
d936cf9b 312out:
8cc3b392
HD
313 /* Leave bad fields for debug, except PageBuddy could make trouble */
314 __ClearPageBuddy(page);
9f158333 315 add_taint(TAINT_BAD_PAGE);
1da177e4
LT
316}
317
1da177e4
LT
318/*
319 * Higher-order pages are called "compound pages". They are structured thusly:
320 *
321 * The first PAGE_SIZE page is called the "head page".
322 *
323 * The remaining PAGE_SIZE pages are called "tail pages".
324 *
325 * All pages have PG_compound set. All pages have their ->private pointing at
326 * the head page (even the head page has this).
327 *
41d78ba5
HD
328 * The first tail page's ->lru.next holds the address of the compound page's
329 * put_page() function. Its ->lru.prev holds the order of allocation.
330 * This usage means that zero-order pages may not be compound.
1da177e4 331 */
d98c7a09
HD
332
333static void free_compound_page(struct page *page)
334{
d85f3385 335 __free_pages_ok(page, compound_order(page));
d98c7a09
HD
336}
337
01ad1c08 338void prep_compound_page(struct page *page, unsigned long order)
18229df5
AW
339{
340 int i;
341 int nr_pages = 1 << order;
342
343 set_compound_page_dtor(page, free_compound_page);
344 set_compound_order(page, order);
345 __SetPageHead(page);
346 for (i = 1; i < nr_pages; i++) {
347 struct page *p = page + i;
348
349 __SetPageTail(p);
350 p->first_page = page;
351 }
352}
353
8cc3b392 354static int destroy_compound_page(struct page *page, unsigned long order)
1da177e4
LT
355{
356 int i;
357 int nr_pages = 1 << order;
8cc3b392 358 int bad = 0;
1da177e4 359
8cc3b392
HD
360 if (unlikely(compound_order(page) != order) ||
361 unlikely(!PageHead(page))) {
224abf92 362 bad_page(page);
8cc3b392
HD
363 bad++;
364 }
1da177e4 365
6d777953 366 __ClearPageHead(page);
8cc3b392 367
18229df5
AW
368 for (i = 1; i < nr_pages; i++) {
369 struct page *p = page + i;
1da177e4 370
e713a21d 371 if (unlikely(!PageTail(p) || (p->first_page != page))) {
224abf92 372 bad_page(page);
8cc3b392
HD
373 bad++;
374 }
d85f3385 375 __ClearPageTail(p);
1da177e4 376 }
8cc3b392
HD
377
378 return bad;
1da177e4 379}
1da177e4 380
17cf4406
NP
381static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
382{
383 int i;
384
6626c5d5
AM
385 /*
386 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
387 * and __GFP_HIGHMEM from hard or soft interrupt context.
388 */
725d704e 389 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
17cf4406
NP
390 for (i = 0; i < (1 << order); i++)
391 clear_highpage(page + i);
392}
393
6aa3001b
AM
394static inline void set_page_order(struct page *page, int order)
395{
4c21e2f2 396 set_page_private(page, order);
676165a8 397 __SetPageBuddy(page);
1da177e4
LT
398}
399
400static inline void rmv_page_order(struct page *page)
401{
676165a8 402 __ClearPageBuddy(page);
4c21e2f2 403 set_page_private(page, 0);
1da177e4
LT
404}
405
406/*
407 * Locate the struct page for both the matching buddy in our
408 * pair (buddy1) and the combined O(n+1) page they form (page).
409 *
410 * 1) Any buddy B1 will have an order O twin B2 which satisfies
411 * the following equation:
412 * B2 = B1 ^ (1 << O)
413 * For example, if the starting buddy (buddy2) is #8 its order
414 * 1 buddy is #10:
415 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
416 *
417 * 2) Any buddy B will have an order O+1 parent P which
418 * satisfies the following equation:
419 * P = B & ~(1 << O)
420 *
d6e05edc 421 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
1da177e4
LT
422 */
423static inline struct page *
424__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
425{
426 unsigned long buddy_idx = page_idx ^ (1 << order);
427
428 return page + (buddy_idx - page_idx);
429}
430
431static inline unsigned long
432__find_combined_index(unsigned long page_idx, unsigned int order)
433{
434 return (page_idx & ~(1 << order));
435}
436
437/*
438 * This function checks whether a page is free && is the buddy
439 * we can do coalesce a page and its buddy if
13e7444b 440 * (a) the buddy is not in a hole &&
676165a8 441 * (b) the buddy is in the buddy system &&
cb2b95e1
AW
442 * (c) a page and its buddy have the same order &&
443 * (d) a page and its buddy are in the same zone.
676165a8
NP
444 *
445 * For recording whether a page is in the buddy system, we use PG_buddy.
446 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
1da177e4 447 *
676165a8 448 * For recording page's order, we use page_private(page).
1da177e4 449 */
cb2b95e1
AW
450static inline int page_is_buddy(struct page *page, struct page *buddy,
451 int order)
1da177e4 452{
14e07298 453 if (!pfn_valid_within(page_to_pfn(buddy)))
13e7444b 454 return 0;
13e7444b 455
cb2b95e1
AW
456 if (page_zone_id(page) != page_zone_id(buddy))
457 return 0;
458
459 if (PageBuddy(buddy) && page_order(buddy) == order) {
a3af9c38 460 VM_BUG_ON(page_count(buddy) != 0);
6aa3001b 461 return 1;
676165a8 462 }
6aa3001b 463 return 0;
1da177e4
LT
464}
465
466/*
467 * Freeing function for a buddy system allocator.
468 *
469 * The concept of a buddy system is to maintain direct-mapped table
470 * (containing bit values) for memory blocks of various "orders".
471 * The bottom level table contains the map for the smallest allocatable
472 * units of memory (here, pages), and each level above it describes
473 * pairs of units from the levels below, hence, "buddies".
474 * At a high level, all that happens here is marking the table entry
475 * at the bottom level available, and propagating the changes upward
476 * as necessary, plus some accounting needed to play nicely with other
477 * parts of the VM system.
478 * At each level, we keep a list of pages, which are heads of continuous
676165a8 479 * free pages of length of (1 << order) and marked with PG_buddy. Page's
4c21e2f2 480 * order is recorded in page_private(page) field.
1da177e4
LT
481 * So when we are allocating or freeing one, we can derive the state of the
482 * other. That is, if we allocate a small block, and both were
483 * free, the remainder of the region must be split into blocks.
484 * If a block is freed, and its buddy is also free, then this
485 * triggers coalescing into a block of larger size.
486 *
487 * -- wli
488 */
489
48db57f8 490static inline void __free_one_page(struct page *page,
ed0ae21d
MG
491 struct zone *zone, unsigned int order,
492 int migratetype)
1da177e4
LT
493{
494 unsigned long page_idx;
6dda9d55
CZ
495 unsigned long combined_idx;
496 struct page *buddy;
1da177e4 497
224abf92 498 if (unlikely(PageCompound(page)))
8cc3b392
HD
499 if (unlikely(destroy_compound_page(page, order)))
500 return;
1da177e4 501
ed0ae21d
MG
502 VM_BUG_ON(migratetype == -1);
503
1da177e4
LT
504 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
505
f2260e6b 506 VM_BUG_ON(page_idx & ((1 << order) - 1));
725d704e 507 VM_BUG_ON(bad_range(zone, page));
1da177e4 508
1da177e4 509 while (order < MAX_ORDER-1) {
1da177e4 510 buddy = __page_find_buddy(page, page_idx, order);
cb2b95e1 511 if (!page_is_buddy(page, buddy, order))
3c82d0ce 512 break;
13e7444b 513
3c82d0ce 514 /* Our buddy is free, merge with it and move up one order. */
1da177e4 515 list_del(&buddy->lru);
b2a0ac88 516 zone->free_area[order].nr_free--;
1da177e4 517 rmv_page_order(buddy);
13e7444b 518 combined_idx = __find_combined_index(page_idx, order);
1da177e4
LT
519 page = page + (combined_idx - page_idx);
520 page_idx = combined_idx;
521 order++;
522 }
523 set_page_order(page, order);
6dda9d55
CZ
524
525 /*
526 * If this is not the largest possible page, check if the buddy
527 * of the next-highest order is free. If it is, it's possible
528 * that pages are being freed that will coalesce soon. In case,
529 * that is happening, add the free page to the tail of the list
530 * so it's less likely to be used soon and more likely to be merged
531 * as a higher order page
532 */
533 if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
534 struct page *higher_page, *higher_buddy;
535 combined_idx = __find_combined_index(page_idx, order);
536 higher_page = page + combined_idx - page_idx;
537 higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
538 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
539 list_add_tail(&page->lru,
540 &zone->free_area[order].free_list[migratetype]);
541 goto out;
542 }
543 }
544
545 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
546out:
1da177e4
LT
547 zone->free_area[order].nr_free++;
548}
549
092cead6
KM
550/*
551 * free_page_mlock() -- clean up attempts to free and mlocked() page.
552 * Page should not be on lru, so no need to fix that up.
553 * free_pages_check() will verify...
554 */
555static inline void free_page_mlock(struct page *page)
556{
092cead6
KM
557 __dec_zone_page_state(page, NR_MLOCK);
558 __count_vm_event(UNEVICTABLE_MLOCKFREED);
559}
092cead6 560
224abf92 561static inline int free_pages_check(struct page *page)
1da177e4 562{
92be2e33
NP
563 if (unlikely(page_mapcount(page) |
564 (page->mapping != NULL) |
a3af9c38 565 (atomic_read(&page->_count) != 0) |
8cc3b392 566 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
224abf92 567 bad_page(page);
79f4b7bf 568 return 1;
8cc3b392 569 }
79f4b7bf
HD
570 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
571 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
572 return 0;
1da177e4
LT
573}
574
575/*
5f8dcc21 576 * Frees a number of pages from the PCP lists
1da177e4 577 * Assumes all pages on list are in same zone, and of same order.
207f36ee 578 * count is the number of pages to free.
1da177e4
LT
579 *
580 * If the zone was previously in an "all pages pinned" state then look to
581 * see if this freeing clears that state.
582 *
583 * And clear the zone's pages_scanned counter, to hold off the "all pages are
584 * pinned" detection logic.
585 */
5f8dcc21
MG
586static void free_pcppages_bulk(struct zone *zone, int count,
587 struct per_cpu_pages *pcp)
1da177e4 588{
5f8dcc21 589 int migratetype = 0;
a6f9edd6 590 int batch_free = 0;
5f8dcc21 591
c54ad30c 592 spin_lock(&zone->lock);
93e4a89a 593 zone->all_unreclaimable = 0;
1da177e4 594 zone->pages_scanned = 0;
f2260e6b 595
5f8dcc21 596 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
a6f9edd6 597 while (count) {
48db57f8 598 struct page *page;
5f8dcc21
MG
599 struct list_head *list;
600
601 /*
a6f9edd6
MG
602 * Remove pages from lists in a round-robin fashion. A
603 * batch_free count is maintained that is incremented when an
604 * empty list is encountered. This is so more pages are freed
605 * off fuller lists instead of spinning excessively around empty
606 * lists
5f8dcc21
MG
607 */
608 do {
a6f9edd6 609 batch_free++;
5f8dcc21
MG
610 if (++migratetype == MIGRATE_PCPTYPES)
611 migratetype = 0;
612 list = &pcp->lists[migratetype];
613 } while (list_empty(list));
48db57f8 614
a6f9edd6
MG
615 do {
616 page = list_entry(list->prev, struct page, lru);
617 /* must delete as __free_one_page list manipulates */
618 list_del(&page->lru);
a7016235
HD
619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
620 __free_one_page(page, zone, 0, page_private(page));
621 trace_mm_page_pcpu_drain(page, 0, page_private(page));
a6f9edd6 622 } while (--count && --batch_free && !list_empty(list));
1da177e4 623 }
c54ad30c 624 spin_unlock(&zone->lock);
1da177e4
LT
625}
626
ed0ae21d
MG
627static void free_one_page(struct zone *zone, struct page *page, int order,
628 int migratetype)
1da177e4 629{
006d22d9 630 spin_lock(&zone->lock);
93e4a89a 631 zone->all_unreclaimable = 0;
006d22d9 632 zone->pages_scanned = 0;
f2260e6b
MG
633
634 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
ed0ae21d 635 __free_one_page(page, zone, order, migratetype);
006d22d9 636 spin_unlock(&zone->lock);
48db57f8
NP
637}
638
ec95f53a 639static bool free_pages_prepare(struct page *page, unsigned int order)
48db57f8 640{
1da177e4 641 int i;
8cc3b392 642 int bad = 0;
1da177e4 643
f650316c 644 trace_mm_page_free_direct(page, order);
b1eeab67
VN
645 kmemcheck_free_shadow(page, order);
646
ec95f53a
KM
647 for (i = 0; i < (1 << order); i++) {
648 struct page *pg = page + i;
649
650 if (PageAnon(pg))
651 pg->mapping = NULL;
652 bad += free_pages_check(pg);
653 }
8cc3b392 654 if (bad)
ec95f53a 655 return false;
689bcebf 656
3ac7fe5a 657 if (!PageHighMem(page)) {
9858db50 658 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
3ac7fe5a
TG
659 debug_check_no_obj_freed(page_address(page),
660 PAGE_SIZE << order);
661 }
dafb1367 662 arch_free_page(page, order);
48db57f8 663 kernel_map_pages(page, 1 << order, 0);
dafb1367 664
ec95f53a
KM
665 return true;
666}
667
668static void __free_pages_ok(struct page *page, unsigned int order)
669{
670 unsigned long flags;
671 int wasMlocked = __TestClearPageMlocked(page);
672
673 if (!free_pages_prepare(page, order))
674 return;
675
c54ad30c 676 local_irq_save(flags);
c277331d 677 if (unlikely(wasMlocked))
da456f14 678 free_page_mlock(page);
f8891e5e 679 __count_vm_events(PGFREE, 1 << order);
ed0ae21d
MG
680 free_one_page(page_zone(page), page, order,
681 get_pageblock_migratetype(page));
c54ad30c 682 local_irq_restore(flags);
1da177e4
LT
683}
684
a226f6c8
DH
685/*
686 * permit the bootmem allocator to evade page validation on high-order frees
687 */
af370fb8 688void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
a226f6c8
DH
689{
690 if (order == 0) {
691 __ClearPageReserved(page);
692 set_page_count(page, 0);
7835e98b 693 set_page_refcounted(page);
545b1ea9 694 __free_page(page);
a226f6c8 695 } else {
a226f6c8
DH
696 int loop;
697
545b1ea9 698 prefetchw(page);
a226f6c8
DH
699 for (loop = 0; loop < BITS_PER_LONG; loop++) {
700 struct page *p = &page[loop];
701
545b1ea9
NP
702 if (loop + 1 < BITS_PER_LONG)
703 prefetchw(p + 1);
a226f6c8
DH
704 __ClearPageReserved(p);
705 set_page_count(p, 0);
706 }
707
7835e98b 708 set_page_refcounted(page);
545b1ea9 709 __free_pages(page, order);
a226f6c8
DH
710 }
711}
712
1da177e4
LT
713
714/*
715 * The order of subdivision here is critical for the IO subsystem.
716 * Please do not alter this order without good reasons and regression
717 * testing. Specifically, as large blocks of memory are subdivided,
718 * the order in which smaller blocks are delivered depends on the order
719 * they're subdivided in this function. This is the primary factor
720 * influencing the order in which pages are delivered to the IO
721 * subsystem according to empirical testing, and this is also justified
722 * by considering the behavior of a buddy system containing a single
723 * large block of memory acted on by a series of small allocations.
724 * This behavior is a critical factor in sglist merging's success.
725 *
726 * -- wli
727 */
085cc7d5 728static inline void expand(struct zone *zone, struct page *page,
b2a0ac88
MG
729 int low, int high, struct free_area *area,
730 int migratetype)
1da177e4
LT
731{
732 unsigned long size = 1 << high;
733
734 while (high > low) {
735 area--;
736 high--;
737 size >>= 1;
725d704e 738 VM_BUG_ON(bad_range(zone, &page[size]));
b2a0ac88 739 list_add(&page[size].lru, &area->free_list[migratetype]);
1da177e4
LT
740 area->nr_free++;
741 set_page_order(&page[size], high);
742 }
1da177e4
LT
743}
744
1da177e4
LT
745/*
746 * This page is about to be returned from the page allocator
747 */
2a7684a2 748static inline int check_new_page(struct page *page)
1da177e4 749{
92be2e33
NP
750 if (unlikely(page_mapcount(page) |
751 (page->mapping != NULL) |
a3af9c38 752 (atomic_read(&page->_count) != 0) |
8cc3b392 753 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
224abf92 754 bad_page(page);
689bcebf 755 return 1;
8cc3b392 756 }
2a7684a2
WF
757 return 0;
758}
759
760static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
761{
762 int i;
763
764 for (i = 0; i < (1 << order); i++) {
765 struct page *p = page + i;
766 if (unlikely(check_new_page(p)))
767 return 1;
768 }
689bcebf 769
4c21e2f2 770 set_page_private(page, 0);
7835e98b 771 set_page_refcounted(page);
cc102509
NP
772
773 arch_alloc_page(page, order);
1da177e4 774 kernel_map_pages(page, 1 << order, 1);
17cf4406
NP
775
776 if (gfp_flags & __GFP_ZERO)
777 prep_zero_page(page, order, gfp_flags);
778
779 if (order && (gfp_flags & __GFP_COMP))
780 prep_compound_page(page, order);
781
689bcebf 782 return 0;
1da177e4
LT
783}
784
56fd56b8
MG
785/*
786 * Go through the free lists for the given migratetype and remove
787 * the smallest available page from the freelists
788 */
728ec980
MG
789static inline
790struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
56fd56b8
MG
791 int migratetype)
792{
793 unsigned int current_order;
794 struct free_area * area;
795 struct page *page;
796
797 /* Find a page of the appropriate size in the preferred list */
798 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
799 area = &(zone->free_area[current_order]);
800 if (list_empty(&area->free_list[migratetype]))
801 continue;
802
803 page = list_entry(area->free_list[migratetype].next,
804 struct page, lru);
805 list_del(&page->lru);
806 rmv_page_order(page);
807 area->nr_free--;
56fd56b8
MG
808 expand(zone, page, order, current_order, area, migratetype);
809 return page;
810 }
811
812 return NULL;
813}
814
815
b2a0ac88
MG
816/*
817 * This array describes the order lists are fallen back to when
818 * the free lists for the desirable migrate type are depleted
819 */
820static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
64c5e135
MG
821 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
822 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
823 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
824 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
b2a0ac88
MG
825};
826
c361be55
MG
827/*
828 * Move the free pages in a range to the free lists of the requested type.
d9c23400 829 * Note that start_page and end_pages are not aligned on a pageblock
c361be55
MG
830 * boundary. If alignment is required, use move_freepages_block()
831 */
b69a7288
AB
832static int move_freepages(struct zone *zone,
833 struct page *start_page, struct page *end_page,
834 int migratetype)
c361be55
MG
835{
836 struct page *page;
837 unsigned long order;
d100313f 838 int pages_moved = 0;
c361be55
MG
839
840#ifndef CONFIG_HOLES_IN_ZONE
841 /*
842 * page_zone is not safe to call in this context when
843 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
844 * anyway as we check zone boundaries in move_freepages_block().
845 * Remove at a later date when no bug reports exist related to
ac0e5b7a 846 * grouping pages by mobility
c361be55
MG
847 */
848 BUG_ON(page_zone(start_page) != page_zone(end_page));
849#endif
850
851 for (page = start_page; page <= end_page;) {
344c790e
AL
852 /* Make sure we are not inadvertently changing nodes */
853 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
854
c361be55
MG
855 if (!pfn_valid_within(page_to_pfn(page))) {
856 page++;
857 continue;
858 }
859
860 if (!PageBuddy(page)) {
861 page++;
862 continue;
863 }
864
865 order = page_order(page);
866 list_del(&page->lru);
867 list_add(&page->lru,
868 &zone->free_area[order].free_list[migratetype]);
869 page += 1 << order;
d100313f 870 pages_moved += 1 << order;
c361be55
MG
871 }
872
d100313f 873 return pages_moved;
c361be55
MG
874}
875
b69a7288
AB
876static int move_freepages_block(struct zone *zone, struct page *page,
877 int migratetype)
c361be55
MG
878{
879 unsigned long start_pfn, end_pfn;
880 struct page *start_page, *end_page;
881
882 start_pfn = page_to_pfn(page);
d9c23400 883 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
c361be55 884 start_page = pfn_to_page(start_pfn);
d9c23400
MG
885 end_page = start_page + pageblock_nr_pages - 1;
886 end_pfn = start_pfn + pageblock_nr_pages - 1;
c361be55
MG
887
888 /* Do not cross zone boundaries */
889 if (start_pfn < zone->zone_start_pfn)
890 start_page = page;
891 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
892 return 0;
893
894 return move_freepages(zone, start_page, end_page, migratetype);
895}
896
2f66a68f
MG
897static void change_pageblock_range(struct page *pageblock_page,
898 int start_order, int migratetype)
899{
900 int nr_pageblocks = 1 << (start_order - pageblock_order);
901
902 while (nr_pageblocks--) {
903 set_pageblock_migratetype(pageblock_page, migratetype);
904 pageblock_page += pageblock_nr_pages;
905 }
906}
907
b2a0ac88 908/* Remove an element from the buddy allocator from the fallback list */
0ac3a409
MG
909static inline struct page *
910__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
b2a0ac88
MG
911{
912 struct free_area * area;
913 int current_order;
914 struct page *page;
915 int migratetype, i;
916
917 /* Find the largest possible block of pages in the other list */
918 for (current_order = MAX_ORDER-1; current_order >= order;
919 --current_order) {
920 for (i = 0; i < MIGRATE_TYPES - 1; i++) {
921 migratetype = fallbacks[start_migratetype][i];
922
56fd56b8
MG
923 /* MIGRATE_RESERVE handled later if necessary */
924 if (migratetype == MIGRATE_RESERVE)
925 continue;
e010487d 926
b2a0ac88
MG
927 area = &(zone->free_area[current_order]);
928 if (list_empty(&area->free_list[migratetype]))
929 continue;
930
931 page = list_entry(area->free_list[migratetype].next,
932 struct page, lru);
933 area->nr_free--;
934
935 /*
c361be55 936 * If breaking a large block of pages, move all free
46dafbca
MG
937 * pages to the preferred allocation list. If falling
938 * back for a reclaimable kernel allocation, be more
939 * agressive about taking ownership of free pages
b2a0ac88 940 */
d9c23400 941 if (unlikely(current_order >= (pageblock_order >> 1)) ||
dd5d241e
MG
942 start_migratetype == MIGRATE_RECLAIMABLE ||
943 page_group_by_mobility_disabled) {
46dafbca
MG
944 unsigned long pages;
945 pages = move_freepages_block(zone, page,
946 start_migratetype);
947
948 /* Claim the whole block if over half of it is free */
dd5d241e
MG
949 if (pages >= (1 << (pageblock_order-1)) ||
950 page_group_by_mobility_disabled)
46dafbca
MG
951 set_pageblock_migratetype(page,
952 start_migratetype);
953
b2a0ac88 954 migratetype = start_migratetype;
c361be55 955 }
b2a0ac88
MG
956
957 /* Remove the page from the freelists */
958 list_del(&page->lru);
959 rmv_page_order(page);
b2a0ac88 960
2f66a68f
MG
961 /* Take ownership for orders >= pageblock_order */
962 if (current_order >= pageblock_order)
963 change_pageblock_range(page, current_order,
b2a0ac88
MG
964 start_migratetype);
965
966 expand(zone, page, order, current_order, area, migratetype);
e0fff1bd
MG
967
968 trace_mm_page_alloc_extfrag(page, order, current_order,
969 start_migratetype, migratetype);
970
b2a0ac88
MG
971 return page;
972 }
973 }
974
728ec980 975 return NULL;
b2a0ac88
MG
976}
977
56fd56b8 978/*
1da177e4
LT
979 * Do the hard work of removing an element from the buddy allocator.
980 * Call me with the zone->lock already held.
981 */
b2a0ac88
MG
982static struct page *__rmqueue(struct zone *zone, unsigned int order,
983 int migratetype)
1da177e4 984{
1da177e4
LT
985 struct page *page;
986
728ec980 987retry_reserve:
56fd56b8 988 page = __rmqueue_smallest(zone, order, migratetype);
b2a0ac88 989
728ec980 990 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
56fd56b8 991 page = __rmqueue_fallback(zone, order, migratetype);
b2a0ac88 992
728ec980
MG
993 /*
994 * Use MIGRATE_RESERVE rather than fail an allocation. goto
995 * is used because __rmqueue_smallest is an inline function
996 * and we want just one call site
997 */
998 if (!page) {
999 migratetype = MIGRATE_RESERVE;
1000 goto retry_reserve;
1001 }
1002 }
1003
0d3d062a 1004 trace_mm_page_alloc_zone_locked(page, order, migratetype);
b2a0ac88 1005 return page;
1da177e4
LT
1006}
1007
1008/*
1009 * Obtain a specified number of elements from the buddy allocator, all under
1010 * a single hold of the lock, for efficiency. Add them to the supplied list.
1011 * Returns the number of new pages which were placed at *list.
1012 */
1013static int rmqueue_bulk(struct zone *zone, unsigned int order,
b2a0ac88 1014 unsigned long count, struct list_head *list,
e084b2d9 1015 int migratetype, int cold)
1da177e4 1016{
1da177e4 1017 int i;
1da177e4 1018
c54ad30c 1019 spin_lock(&zone->lock);
1da177e4 1020 for (i = 0; i < count; ++i) {
b2a0ac88 1021 struct page *page = __rmqueue(zone, order, migratetype);
085cc7d5 1022 if (unlikely(page == NULL))
1da177e4 1023 break;
81eabcbe
MG
1024
1025 /*
1026 * Split buddy pages returned by expand() are received here
1027 * in physical page order. The page is added to the callers and
1028 * list and the list head then moves forward. From the callers
1029 * perspective, the linked list is ordered by page number in
1030 * some conditions. This is useful for IO devices that can
1031 * merge IO requests if the physical pages are ordered
1032 * properly.
1033 */
e084b2d9
MG
1034 if (likely(cold == 0))
1035 list_add(&page->lru, list);
1036 else
1037 list_add_tail(&page->lru, list);
535131e6 1038 set_page_private(page, migratetype);
81eabcbe 1039 list = &page->lru;
1da177e4 1040 }
f2260e6b 1041 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
c54ad30c 1042 spin_unlock(&zone->lock);
085cc7d5 1043 return i;
1da177e4
LT
1044}
1045
4ae7c039 1046#ifdef CONFIG_NUMA
8fce4d8e 1047/*
4037d452
CL
1048 * Called from the vmstat counter updater to drain pagesets of this
1049 * currently executing processor on remote nodes after they have
1050 * expired.
1051 *
879336c3
CL
1052 * Note that this function must be called with the thread pinned to
1053 * a single processor.
8fce4d8e 1054 */
4037d452 1055void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
4ae7c039 1056{
4ae7c039 1057 unsigned long flags;
4037d452 1058 int to_drain;
4ae7c039 1059
4037d452
CL
1060 local_irq_save(flags);
1061 if (pcp->count >= pcp->batch)
1062 to_drain = pcp->batch;
1063 else
1064 to_drain = pcp->count;
5f8dcc21 1065 free_pcppages_bulk(zone, to_drain, pcp);
4037d452
CL
1066 pcp->count -= to_drain;
1067 local_irq_restore(flags);
4ae7c039
CL
1068}
1069#endif
1070
9f8f2172
CL
1071/*
1072 * Drain pages of the indicated processor.
1073 *
1074 * The processor must either be the current processor and the
1075 * thread pinned to the current processor or a processor that
1076 * is not online.
1077 */
1078static void drain_pages(unsigned int cpu)
1da177e4 1079{
c54ad30c 1080 unsigned long flags;
1da177e4 1081 struct zone *zone;
1da177e4 1082
ee99c71c 1083 for_each_populated_zone(zone) {
1da177e4 1084 struct per_cpu_pageset *pset;
3dfa5721 1085 struct per_cpu_pages *pcp;
1da177e4 1086
99dcc3e5
CL
1087 local_irq_save(flags);
1088 pset = per_cpu_ptr(zone->pageset, cpu);
3dfa5721
CL
1089
1090 pcp = &pset->pcp;
5f8dcc21 1091 free_pcppages_bulk(zone, pcp->count, pcp);
3dfa5721
CL
1092 pcp->count = 0;
1093 local_irq_restore(flags);
1da177e4
LT
1094 }
1095}
1da177e4 1096
9f8f2172
CL
1097/*
1098 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1099 */
1100void drain_local_pages(void *arg)
1101{
1102 drain_pages(smp_processor_id());
1103}
1104
1105/*
1106 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1107 */
1108void drain_all_pages(void)
1109{
15c8b6c1 1110 on_each_cpu(drain_local_pages, NULL, 1);
9f8f2172
CL
1111}
1112
296699de 1113#ifdef CONFIG_HIBERNATION
1da177e4
LT
1114
1115void mark_free_pages(struct zone *zone)
1116{
f623f0db
RW
1117 unsigned long pfn, max_zone_pfn;
1118 unsigned long flags;
b2a0ac88 1119 int order, t;
1da177e4
LT
1120 struct list_head *curr;
1121
1122 if (!zone->spanned_pages)
1123 return;
1124
1125 spin_lock_irqsave(&zone->lock, flags);
f623f0db
RW
1126
1127 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1128 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1129 if (pfn_valid(pfn)) {
1130 struct page *page = pfn_to_page(pfn);
1131
7be98234
RW
1132 if (!swsusp_page_is_forbidden(page))
1133 swsusp_unset_page_free(page);
f623f0db 1134 }
1da177e4 1135
b2a0ac88
MG
1136 for_each_migratetype_order(order, t) {
1137 list_for_each(curr, &zone->free_area[order].free_list[t]) {
f623f0db 1138 unsigned long i;
1da177e4 1139
f623f0db
RW
1140 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1141 for (i = 0; i < (1UL << order); i++)
7be98234 1142 swsusp_set_page_free(pfn_to_page(pfn + i));
f623f0db 1143 }
b2a0ac88 1144 }
1da177e4
LT
1145 spin_unlock_irqrestore(&zone->lock, flags);
1146}
e2c55dc8 1147#endif /* CONFIG_PM */
1da177e4 1148
1da177e4
LT
1149/*
1150 * Free a 0-order page
fc91668e 1151 * cold == 1 ? free a cold page : free a hot page
1da177e4 1152 */
fc91668e 1153void free_hot_cold_page(struct page *page, int cold)
1da177e4
LT
1154{
1155 struct zone *zone = page_zone(page);
1156 struct per_cpu_pages *pcp;
1157 unsigned long flags;
5f8dcc21 1158 int migratetype;
451ea25d 1159 int wasMlocked = __TestClearPageMlocked(page);
1da177e4 1160
ec95f53a 1161 if (!free_pages_prepare(page, 0))
689bcebf
HD
1162 return;
1163
5f8dcc21
MG
1164 migratetype = get_pageblock_migratetype(page);
1165 set_page_private(page, migratetype);
1da177e4 1166 local_irq_save(flags);
c277331d 1167 if (unlikely(wasMlocked))
da456f14 1168 free_page_mlock(page);
f8891e5e 1169 __count_vm_event(PGFREE);
da456f14 1170
5f8dcc21
MG
1171 /*
1172 * We only track unmovable, reclaimable and movable on pcp lists.
1173 * Free ISOLATE pages back to the allocator because they are being
1174 * offlined but treat RESERVE as movable pages so we can get those
1175 * areas back if necessary. Otherwise, we may have to free
1176 * excessively into the page allocator
1177 */
1178 if (migratetype >= MIGRATE_PCPTYPES) {
1179 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1180 free_one_page(zone, page, 0, migratetype);
1181 goto out;
1182 }
1183 migratetype = MIGRATE_MOVABLE;
1184 }
1185
99dcc3e5 1186 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3dfa5721 1187 if (cold)
5f8dcc21 1188 list_add_tail(&page->lru, &pcp->lists[migratetype]);
3dfa5721 1189 else
5f8dcc21 1190 list_add(&page->lru, &pcp->lists[migratetype]);
1da177e4 1191 pcp->count++;
48db57f8 1192 if (pcp->count >= pcp->high) {
5f8dcc21 1193 free_pcppages_bulk(zone, pcp->batch, pcp);
48db57f8
NP
1194 pcp->count -= pcp->batch;
1195 }
5f8dcc21
MG
1196
1197out:
1da177e4 1198 local_irq_restore(flags);
1da177e4
LT
1199}
1200
8dfcc9ba
NP
1201/*
1202 * split_page takes a non-compound higher-order page, and splits it into
1203 * n (1<<order) sub-pages: page[0..n]
1204 * Each sub-page must be freed individually.
1205 *
1206 * Note: this is probably too low level an operation for use in drivers.
1207 * Please consult with lkml before using this in your driver.
1208 */
1209void split_page(struct page *page, unsigned int order)
1210{
1211 int i;
1212
725d704e
NP
1213 VM_BUG_ON(PageCompound(page));
1214 VM_BUG_ON(!page_count(page));
b1eeab67
VN
1215
1216#ifdef CONFIG_KMEMCHECK
1217 /*
1218 * Split shadow pages too, because free(page[0]) would
1219 * otherwise free the whole shadow.
1220 */
1221 if (kmemcheck_page_is_tracked(page))
1222 split_page(virt_to_page(page[0].shadow), order);
1223#endif
1224
7835e98b
NP
1225 for (i = 1; i < (1 << order); i++)
1226 set_page_refcounted(page + i);
8dfcc9ba 1227}
8dfcc9ba 1228
748446bb
MG
1229/*
1230 * Similar to split_page except the page is already free. As this is only
1231 * being used for migration, the migratetype of the block also changes.
1232 * As this is called with interrupts disabled, the caller is responsible
1233 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1234 * are enabled.
1235 *
1236 * Note: this is probably too low level an operation for use in drivers.
1237 * Please consult with lkml before using this in your driver.
1238 */
1239int split_free_page(struct page *page)
1240{
1241 unsigned int order;
1242 unsigned long watermark;
1243 struct zone *zone;
1244
1245 BUG_ON(!PageBuddy(page));
1246
1247 zone = page_zone(page);
1248 order = page_order(page);
1249
1250 /* Obey watermarks as if the page was being allocated */
1251 watermark = low_wmark_pages(zone) + (1 << order);
1252 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1253 return 0;
1254
1255 /* Remove page from free list */
1256 list_del(&page->lru);
1257 zone->free_area[order].nr_free--;
1258 rmv_page_order(page);
1259 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1260
1261 /* Split into individual pages */
1262 set_page_refcounted(page);
1263 split_page(page, order);
1264
1265 if (order >= pageblock_order - 1) {
1266 struct page *endpage = page + (1 << order) - 1;
1267 for (; page < endpage; page += pageblock_nr_pages)
1268 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1269 }
1270
1271 return 1 << order;
1272}
1273
1da177e4
LT
1274/*
1275 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1276 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1277 * or two.
1278 */
0a15c3e9
MG
1279static inline
1280struct page *buffered_rmqueue(struct zone *preferred_zone,
3dd28266
MG
1281 struct zone *zone, int order, gfp_t gfp_flags,
1282 int migratetype)
1da177e4
LT
1283{
1284 unsigned long flags;
689bcebf 1285 struct page *page;
1da177e4
LT
1286 int cold = !!(gfp_flags & __GFP_COLD);
1287
689bcebf 1288again:
48db57f8 1289 if (likely(order == 0)) {
1da177e4 1290 struct per_cpu_pages *pcp;
5f8dcc21 1291 struct list_head *list;
1da177e4 1292
1da177e4 1293 local_irq_save(flags);
99dcc3e5
CL
1294 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1295 list = &pcp->lists[migratetype];
5f8dcc21 1296 if (list_empty(list)) {
535131e6 1297 pcp->count += rmqueue_bulk(zone, 0,
5f8dcc21 1298 pcp->batch, list,
e084b2d9 1299 migratetype, cold);
5f8dcc21 1300 if (unlikely(list_empty(list)))
6fb332fa 1301 goto failed;
535131e6 1302 }
b92a6edd 1303
5f8dcc21
MG
1304 if (cold)
1305 page = list_entry(list->prev, struct page, lru);
1306 else
1307 page = list_entry(list->next, struct page, lru);
1308
b92a6edd
MG
1309 list_del(&page->lru);
1310 pcp->count--;
7fb1d9fc 1311 } else {
dab48dab
AM
1312 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1313 /*
1314 * __GFP_NOFAIL is not to be used in new code.
1315 *
1316 * All __GFP_NOFAIL callers should be fixed so that they
1317 * properly detect and handle allocation failures.
1318 *
1319 * We most definitely don't want callers attempting to
4923abf9 1320 * allocate greater than order-1 page units with
dab48dab
AM
1321 * __GFP_NOFAIL.
1322 */
4923abf9 1323 WARN_ON_ONCE(order > 1);
dab48dab 1324 }
1da177e4 1325 spin_lock_irqsave(&zone->lock, flags);
b2a0ac88 1326 page = __rmqueue(zone, order, migratetype);
a74609fa
NP
1327 spin_unlock(&zone->lock);
1328 if (!page)
1329 goto failed;
6ccf80eb 1330 __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1da177e4
LT
1331 }
1332
f8891e5e 1333 __count_zone_vm_events(PGALLOC, zone, 1 << order);
18ea7e71 1334 zone_statistics(preferred_zone, zone);
a74609fa 1335 local_irq_restore(flags);
1da177e4 1336
725d704e 1337 VM_BUG_ON(bad_range(zone, page));
17cf4406 1338 if (prep_new_page(page, order, gfp_flags))
a74609fa 1339 goto again;
1da177e4 1340 return page;
a74609fa
NP
1341
1342failed:
1343 local_irq_restore(flags);
a74609fa 1344 return NULL;
1da177e4
LT
1345}
1346
41858966
MG
1347/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1348#define ALLOC_WMARK_MIN WMARK_MIN
1349#define ALLOC_WMARK_LOW WMARK_LOW
1350#define ALLOC_WMARK_HIGH WMARK_HIGH
1351#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1352
1353/* Mask to get the watermark bits */
1354#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1355
3148890b
NP
1356#define ALLOC_HARDER 0x10 /* try to alloc harder */
1357#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1358#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
7fb1d9fc 1359
933e312e
AM
1360#ifdef CONFIG_FAIL_PAGE_ALLOC
1361
1362static struct fail_page_alloc_attr {
1363 struct fault_attr attr;
1364
1365 u32 ignore_gfp_highmem;
1366 u32 ignore_gfp_wait;
54114994 1367 u32 min_order;
933e312e
AM
1368
1369#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1370
1371 struct dentry *ignore_gfp_highmem_file;
1372 struct dentry *ignore_gfp_wait_file;
54114994 1373 struct dentry *min_order_file;
933e312e
AM
1374
1375#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1376
1377} fail_page_alloc = {
1378 .attr = FAULT_ATTR_INITIALIZER,
6b1b60f4
DM
1379 .ignore_gfp_wait = 1,
1380 .ignore_gfp_highmem = 1,
54114994 1381 .min_order = 1,
933e312e
AM
1382};
1383
1384static int __init setup_fail_page_alloc(char *str)
1385{
1386 return setup_fault_attr(&fail_page_alloc.attr, str);
1387}
1388__setup("fail_page_alloc=", setup_fail_page_alloc);
1389
1390static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1391{
54114994
AM
1392 if (order < fail_page_alloc.min_order)
1393 return 0;
933e312e
AM
1394 if (gfp_mask & __GFP_NOFAIL)
1395 return 0;
1396 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1397 return 0;
1398 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1399 return 0;
1400
1401 return should_fail(&fail_page_alloc.attr, 1 << order);
1402}
1403
1404#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1405
1406static int __init fail_page_alloc_debugfs(void)
1407{
1408 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1409 struct dentry *dir;
1410 int err;
1411
1412 err = init_fault_attr_dentries(&fail_page_alloc.attr,
1413 "fail_page_alloc");
1414 if (err)
1415 return err;
1416 dir = fail_page_alloc.attr.dentries.dir;
1417
1418 fail_page_alloc.ignore_gfp_wait_file =
1419 debugfs_create_bool("ignore-gfp-wait", mode, dir,
1420 &fail_page_alloc.ignore_gfp_wait);
1421
1422 fail_page_alloc.ignore_gfp_highmem_file =
1423 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1424 &fail_page_alloc.ignore_gfp_highmem);
54114994
AM
1425 fail_page_alloc.min_order_file =
1426 debugfs_create_u32("min-order", mode, dir,
1427 &fail_page_alloc.min_order);
933e312e
AM
1428
1429 if (!fail_page_alloc.ignore_gfp_wait_file ||
54114994
AM
1430 !fail_page_alloc.ignore_gfp_highmem_file ||
1431 !fail_page_alloc.min_order_file) {
933e312e
AM
1432 err = -ENOMEM;
1433 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1434 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
54114994 1435 debugfs_remove(fail_page_alloc.min_order_file);
933e312e
AM
1436 cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1437 }
1438
1439 return err;
1440}
1441
1442late_initcall(fail_page_alloc_debugfs);
1443
1444#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1445
1446#else /* CONFIG_FAIL_PAGE_ALLOC */
1447
1448static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1449{
1450 return 0;
1451}
1452
1453#endif /* CONFIG_FAIL_PAGE_ALLOC */
1454
1da177e4
LT
1455/*
1456 * Return 1 if free pages are above 'mark'. This takes into account the order
1457 * of the allocation.
1458 */
1459int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
7fb1d9fc 1460 int classzone_idx, int alloc_flags)
1da177e4
LT
1461{
1462 /* free_pages my go negative - that's OK */
d23ad423
CL
1463 long min = mark;
1464 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1da177e4
LT
1465 int o;
1466
7fb1d9fc 1467 if (alloc_flags & ALLOC_HIGH)
1da177e4 1468 min -= min / 2;
7fb1d9fc 1469 if (alloc_flags & ALLOC_HARDER)
1da177e4
LT
1470 min -= min / 4;
1471
1472 if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1473 return 0;
1474 for (o = 0; o < order; o++) {
1475 /* At the next order, this order's pages become unavailable */
1476 free_pages -= z->free_area[o].nr_free << o;
1477
1478 /* Require fewer higher order pages to be free */
1479 min >>= 1;
1480
1481 if (free_pages <= min)
1482 return 0;
1483 }
1484 return 1;
1485}
1486
9276b1bc
PJ
1487#ifdef CONFIG_NUMA
1488/*
1489 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1490 * skip over zones that are not allowed by the cpuset, or that have
1491 * been recently (in last second) found to be nearly full. See further
1492 * comments in mmzone.h. Reduces cache footprint of zonelist scans
183ff22b 1493 * that have to skip over a lot of full or unallowed zones.
9276b1bc
PJ
1494 *
1495 * If the zonelist cache is present in the passed in zonelist, then
1496 * returns a pointer to the allowed node mask (either the current
37b07e41 1497 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
9276b1bc
PJ
1498 *
1499 * If the zonelist cache is not available for this zonelist, does
1500 * nothing and returns NULL.
1501 *
1502 * If the fullzones BITMAP in the zonelist cache is stale (more than
1503 * a second since last zap'd) then we zap it out (clear its bits.)
1504 *
1505 * We hold off even calling zlc_setup, until after we've checked the
1506 * first zone in the zonelist, on the theory that most allocations will
1507 * be satisfied from that first zone, so best to examine that zone as
1508 * quickly as we can.
1509 */
1510static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1511{
1512 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1513 nodemask_t *allowednodes; /* zonelist_cache approximation */
1514
1515 zlc = zonelist->zlcache_ptr;
1516 if (!zlc)
1517 return NULL;
1518
f05111f5 1519 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
9276b1bc
PJ
1520 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1521 zlc->last_full_zap = jiffies;
1522 }
1523
1524 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1525 &cpuset_current_mems_allowed :
37b07e41 1526 &node_states[N_HIGH_MEMORY];
9276b1bc
PJ
1527 return allowednodes;
1528}
1529
1530/*
1531 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1532 * if it is worth looking at further for free memory:
1533 * 1) Check that the zone isn't thought to be full (doesn't have its
1534 * bit set in the zonelist_cache fullzones BITMAP).
1535 * 2) Check that the zones node (obtained from the zonelist_cache
1536 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1537 * Return true (non-zero) if zone is worth looking at further, or
1538 * else return false (zero) if it is not.
1539 *
1540 * This check -ignores- the distinction between various watermarks,
1541 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1542 * found to be full for any variation of these watermarks, it will
1543 * be considered full for up to one second by all requests, unless
1544 * we are so low on memory on all allowed nodes that we are forced
1545 * into the second scan of the zonelist.
1546 *
1547 * In the second scan we ignore this zonelist cache and exactly
1548 * apply the watermarks to all zones, even it is slower to do so.
1549 * We are low on memory in the second scan, and should leave no stone
1550 * unturned looking for a free page.
1551 */
dd1a239f 1552static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1553 nodemask_t *allowednodes)
1554{
1555 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1556 int i; /* index of *z in zonelist zones */
1557 int n; /* node that zone *z is on */
1558
1559 zlc = zonelist->zlcache_ptr;
1560 if (!zlc)
1561 return 1;
1562
dd1a239f 1563 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1564 n = zlc->z_to_n[i];
1565
1566 /* This zone is worth trying if it is allowed but not full */
1567 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1568}
1569
1570/*
1571 * Given 'z' scanning a zonelist, set the corresponding bit in
1572 * zlc->fullzones, so that subsequent attempts to allocate a page
1573 * from that zone don't waste time re-examining it.
1574 */
dd1a239f 1575static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1576{
1577 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1578 int i; /* index of *z in zonelist zones */
1579
1580 zlc = zonelist->zlcache_ptr;
1581 if (!zlc)
1582 return;
1583
dd1a239f 1584 i = z - zonelist->_zonerefs;
9276b1bc
PJ
1585
1586 set_bit(i, zlc->fullzones);
1587}
1588
1589#else /* CONFIG_NUMA */
1590
1591static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1592{
1593 return NULL;
1594}
1595
dd1a239f 1596static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
9276b1bc
PJ
1597 nodemask_t *allowednodes)
1598{
1599 return 1;
1600}
1601
dd1a239f 1602static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
9276b1bc
PJ
1603{
1604}
1605#endif /* CONFIG_NUMA */
1606
7fb1d9fc 1607/*
0798e519 1608 * get_page_from_freelist goes through the zonelist trying to allocate
7fb1d9fc
RS
1609 * a page.
1610 */
1611static struct page *
19770b32 1612get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
5117f45d 1613 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
3dd28266 1614 struct zone *preferred_zone, int migratetype)
753ee728 1615{
dd1a239f 1616 struct zoneref *z;
7fb1d9fc 1617 struct page *page = NULL;
54a6eb5c 1618 int classzone_idx;
5117f45d 1619 struct zone *zone;
9276b1bc
PJ
1620 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1621 int zlc_active = 0; /* set if using zonelist_cache */
1622 int did_zlc_setup = 0; /* just call zlc_setup() one time */
54a6eb5c 1623
19770b32 1624 classzone_idx = zone_idx(preferred_zone);
9276b1bc 1625zonelist_scan:
7fb1d9fc 1626 /*
9276b1bc 1627 * Scan zonelist, looking for a zone with enough free.
7fb1d9fc
RS
1628 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1629 */
19770b32
MG
1630 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1631 high_zoneidx, nodemask) {
9276b1bc
PJ
1632 if (NUMA_BUILD && zlc_active &&
1633 !zlc_zone_worth_trying(zonelist, z, allowednodes))
1634 continue;
7fb1d9fc 1635 if ((alloc_flags & ALLOC_CPUSET) &&
02a0e53d 1636 !cpuset_zone_allowed_softwall(zone, gfp_mask))
9276b1bc 1637 goto try_next_zone;
7fb1d9fc 1638
41858966 1639 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
7fb1d9fc 1640 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
3148890b 1641 unsigned long mark;
fa5e084e
MG
1642 int ret;
1643
41858966 1644 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
fa5e084e
MG
1645 if (zone_watermark_ok(zone, order, mark,
1646 classzone_idx, alloc_flags))
1647 goto try_this_zone;
1648
1649 if (zone_reclaim_mode == 0)
1650 goto this_zone_full;
1651
1652 ret = zone_reclaim(zone, gfp_mask, order);
1653 switch (ret) {
1654 case ZONE_RECLAIM_NOSCAN:
1655 /* did not scan */
1656 goto try_next_zone;
1657 case ZONE_RECLAIM_FULL:
1658 /* scanned but unreclaimable */
1659 goto this_zone_full;
1660 default:
1661 /* did we reclaim enough */
1662 if (!zone_watermark_ok(zone, order, mark,
1663 classzone_idx, alloc_flags))
9276b1bc 1664 goto this_zone_full;
0798e519 1665 }
7fb1d9fc
RS
1666 }
1667
fa5e084e 1668try_this_zone:
3dd28266
MG
1669 page = buffered_rmqueue(preferred_zone, zone, order,
1670 gfp_mask, migratetype);
0798e519 1671 if (page)
7fb1d9fc 1672 break;
9276b1bc
PJ
1673this_zone_full:
1674 if (NUMA_BUILD)
1675 zlc_mark_zone_full(zonelist, z);
1676try_next_zone:
62bc62a8 1677 if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
d395b734
MG
1678 /*
1679 * we do zlc_setup after the first zone is tried but only
1680 * if there are multiple nodes make it worthwhile
1681 */
9276b1bc
PJ
1682 allowednodes = zlc_setup(zonelist, alloc_flags);
1683 zlc_active = 1;
1684 did_zlc_setup = 1;
1685 }
54a6eb5c 1686 }
9276b1bc
PJ
1687
1688 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1689 /* Disable zlc cache for second zonelist scan */
1690 zlc_active = 0;
1691 goto zonelist_scan;
1692 }
7fb1d9fc 1693 return page;
753ee728
MH
1694}
1695
11e33f6a
MG
1696static inline int
1697should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1698 unsigned long pages_reclaimed)
1da177e4 1699{
11e33f6a
MG
1700 /* Do not loop if specifically requested */
1701 if (gfp_mask & __GFP_NORETRY)
1702 return 0;
1da177e4 1703
11e33f6a
MG
1704 /*
1705 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1706 * means __GFP_NOFAIL, but that may not be true in other
1707 * implementations.
1708 */
1709 if (order <= PAGE_ALLOC_COSTLY_ORDER)
1710 return 1;
1711
1712 /*
1713 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1714 * specified, then we retry until we no longer reclaim any pages
1715 * (above), or we've reclaimed an order of pages at least as
1716 * large as the allocation's order. In both cases, if the
1717 * allocation still fails, we stop retrying.
1718 */
1719 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1720 return 1;
cf40bd16 1721
11e33f6a
MG
1722 /*
1723 * Don't let big-order allocations loop unless the caller
1724 * explicitly requests that.
1725 */
1726 if (gfp_mask & __GFP_NOFAIL)
1727 return 1;
1da177e4 1728
11e33f6a
MG
1729 return 0;
1730}
933e312e 1731
11e33f6a
MG
1732static inline struct page *
1733__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1734 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1735 nodemask_t *nodemask, struct zone *preferred_zone,
1736 int migratetype)
11e33f6a
MG
1737{
1738 struct page *page;
1739
1740 /* Acquire the OOM killer lock for the zones in zonelist */
ff321fea 1741 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
11e33f6a 1742 schedule_timeout_uninterruptible(1);
1da177e4
LT
1743 return NULL;
1744 }
6b1de916 1745
11e33f6a
MG
1746 /*
1747 * Go through the zonelist yet one more time, keep very high watermark
1748 * here, this is only to catch a parallel oom killing, we must fail if
1749 * we're still under heavy pressure.
1750 */
1751 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1752 order, zonelist, high_zoneidx,
5117f45d 1753 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
3dd28266 1754 preferred_zone, migratetype);
7fb1d9fc 1755 if (page)
11e33f6a
MG
1756 goto out;
1757
4365a567
KH
1758 if (!(gfp_mask & __GFP_NOFAIL)) {
1759 /* The OOM killer will not help higher order allocs */
1760 if (order > PAGE_ALLOC_COSTLY_ORDER)
1761 goto out;
03668b3c
DR
1762 /* The OOM killer does not needlessly kill tasks for lowmem */
1763 if (high_zoneidx < ZONE_NORMAL)
1764 goto out;
4365a567
KH
1765 /*
1766 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1767 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1768 * The caller should handle page allocation failure by itself if
1769 * it specifies __GFP_THISNODE.
1770 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1771 */
1772 if (gfp_mask & __GFP_THISNODE)
1773 goto out;
1774 }
11e33f6a 1775 /* Exhausted what can be done so it's blamo time */
4365a567 1776 out_of_memory(zonelist, gfp_mask, order, nodemask);
11e33f6a
MG
1777
1778out:
1779 clear_zonelist_oom(zonelist, gfp_mask);
1780 return page;
1781}
1782
56de7263
MG
1783#ifdef CONFIG_COMPACTION
1784/* Try memory compaction for high-order allocations before reclaim */
1785static struct page *
1786__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1787 struct zonelist *zonelist, enum zone_type high_zoneidx,
1788 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1789 int migratetype, unsigned long *did_some_progress)
1790{
1791 struct page *page;
1792
4f92e258 1793 if (!order || compaction_deferred(preferred_zone))
56de7263
MG
1794 return NULL;
1795
1796 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1797 nodemask);
1798 if (*did_some_progress != COMPACT_SKIPPED) {
1799
1800 /* Page migration frees to the PCP lists but we want merging */
1801 drain_pages(get_cpu());
1802 put_cpu();
1803
1804 page = get_page_from_freelist(gfp_mask, nodemask,
1805 order, zonelist, high_zoneidx,
1806 alloc_flags, preferred_zone,
1807 migratetype);
1808 if (page) {
4f92e258
MG
1809 preferred_zone->compact_considered = 0;
1810 preferred_zone->compact_defer_shift = 0;
56de7263
MG
1811 count_vm_event(COMPACTSUCCESS);
1812 return page;
1813 }
1814
1815 /*
1816 * It's bad if compaction run occurs and fails.
1817 * The most likely reason is that pages exist,
1818 * but not enough to satisfy watermarks.
1819 */
1820 count_vm_event(COMPACTFAIL);
4f92e258 1821 defer_compaction(preferred_zone);
56de7263
MG
1822
1823 cond_resched();
1824 }
1825
1826 return NULL;
1827}
1828#else
1829static inline struct page *
1830__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1831 struct zonelist *zonelist, enum zone_type high_zoneidx,
1832 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1833 int migratetype, unsigned long *did_some_progress)
1834{
1835 return NULL;
1836}
1837#endif /* CONFIG_COMPACTION */
1838
11e33f6a
MG
1839/* The really slow allocator path where we enter direct reclaim */
1840static inline struct page *
1841__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1842 struct zonelist *zonelist, enum zone_type high_zoneidx,
5117f45d 1843 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
3dd28266 1844 int migratetype, unsigned long *did_some_progress)
11e33f6a
MG
1845{
1846 struct page *page = NULL;
1847 struct reclaim_state reclaim_state;
1848 struct task_struct *p = current;
1849
1850 cond_resched();
1851
1852 /* We now go into synchronous reclaim */
1853 cpuset_memory_pressure_bump();
11e33f6a
MG
1854 p->flags |= PF_MEMALLOC;
1855 lockdep_set_current_reclaim_state(gfp_mask);
1856 reclaim_state.reclaimed_slab = 0;
1857 p->reclaim_state = &reclaim_state;
1858
1859 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1860
1861 p->reclaim_state = NULL;
1862 lockdep_clear_current_reclaim_state();
1863 p->flags &= ~PF_MEMALLOC;
1864
1865 cond_resched();
1866
1867 if (order != 0)
1868 drain_all_pages();
1869
1870 if (likely(*did_some_progress))
1871 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1872 zonelist, high_zoneidx,
3dd28266
MG
1873 alloc_flags, preferred_zone,
1874 migratetype);
11e33f6a
MG
1875 return page;
1876}
1877
1da177e4 1878/*
11e33f6a
MG
1879 * This is called in the allocator slow-path if the allocation request is of
1880 * sufficient urgency to ignore watermarks and take other desperate measures
1da177e4 1881 */
11e33f6a
MG
1882static inline struct page *
1883__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1884 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1885 nodemask_t *nodemask, struct zone *preferred_zone,
1886 int migratetype)
11e33f6a
MG
1887{
1888 struct page *page;
1889
1890 do {
1891 page = get_page_from_freelist(gfp_mask, nodemask, order,
5117f45d 1892 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
3dd28266 1893 preferred_zone, migratetype);
11e33f6a
MG
1894
1895 if (!page && gfp_mask & __GFP_NOFAIL)
8aa7e847 1896 congestion_wait(BLK_RW_ASYNC, HZ/50);
11e33f6a
MG
1897 } while (!page && (gfp_mask & __GFP_NOFAIL));
1898
1899 return page;
1900}
1901
1902static inline
1903void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1904 enum zone_type high_zoneidx)
1da177e4 1905{
dd1a239f
MG
1906 struct zoneref *z;
1907 struct zone *zone;
1da177e4 1908
11e33f6a
MG
1909 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1910 wakeup_kswapd(zone, order);
1911}
cf40bd16 1912
341ce06f
PZ
1913static inline int
1914gfp_to_alloc_flags(gfp_t gfp_mask)
1915{
1916 struct task_struct *p = current;
1917 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1918 const gfp_t wait = gfp_mask & __GFP_WAIT;
1da177e4 1919
a56f57ff
MG
1920 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1921 BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
933e312e 1922
341ce06f
PZ
1923 /*
1924 * The caller may dip into page reserves a bit more if the caller
1925 * cannot run direct reclaim, or if the caller has realtime scheduling
1926 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1927 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1928 */
a56f57ff 1929 alloc_flags |= (gfp_mask & __GFP_HIGH);
1da177e4 1930
341ce06f
PZ
1931 if (!wait) {
1932 alloc_flags |= ALLOC_HARDER;
523b9458 1933 /*
341ce06f
PZ
1934 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1935 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
523b9458 1936 */
341ce06f 1937 alloc_flags &= ~ALLOC_CPUSET;
9d0ed60f 1938 } else if (unlikely(rt_task(p)) && !in_interrupt())
341ce06f
PZ
1939 alloc_flags |= ALLOC_HARDER;
1940
1941 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1942 if (!in_interrupt() &&
1943 ((p->flags & PF_MEMALLOC) ||
1944 unlikely(test_thread_flag(TIF_MEMDIE))))
1945 alloc_flags |= ALLOC_NO_WATERMARKS;
1da177e4 1946 }
6b1de916 1947
341ce06f
PZ
1948 return alloc_flags;
1949}
1950
11e33f6a
MG
1951static inline struct page *
1952__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1953 struct zonelist *zonelist, enum zone_type high_zoneidx,
3dd28266
MG
1954 nodemask_t *nodemask, struct zone *preferred_zone,
1955 int migratetype)
11e33f6a
MG
1956{
1957 const gfp_t wait = gfp_mask & __GFP_WAIT;
1958 struct page *page = NULL;
1959 int alloc_flags;
1960 unsigned long pages_reclaimed = 0;
1961 unsigned long did_some_progress;
1962 struct task_struct *p = current;
1da177e4 1963
72807a74
MG
1964 /*
1965 * In the slowpath, we sanity check order to avoid ever trying to
1966 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1967 * be using allocators in order of preference for an area that is
1968 * too large.
1969 */
1fc28b70
MG
1970 if (order >= MAX_ORDER) {
1971 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
72807a74 1972 return NULL;
1fc28b70 1973 }
1da177e4 1974
952f3b51
CL
1975 /*
1976 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1977 * __GFP_NOWARN set) should not cause reclaim since the subsystem
1978 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1979 * using a larger set of nodes after it has established that the
1980 * allowed per node queues are empty and that nodes are
1981 * over allocated.
1982 */
1983 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1984 goto nopage;
1985
cc4a6851 1986restart:
11e33f6a 1987 wake_all_kswapd(order, zonelist, high_zoneidx);
1da177e4 1988
9bf2229f 1989 /*
7fb1d9fc
RS
1990 * OK, we're below the kswapd watermark and have kicked background
1991 * reclaim. Now things get more complex, so set up alloc_flags according
1992 * to how we want to proceed.
9bf2229f 1993 */
341ce06f 1994 alloc_flags = gfp_to_alloc_flags(gfp_mask);
1da177e4 1995
341ce06f 1996 /* This is the last chance, in general, before the goto nopage. */
19770b32 1997 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
341ce06f
PZ
1998 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1999 preferred_zone, migratetype);
7fb1d9fc
RS
2000 if (page)
2001 goto got_pg;
1da177e4 2002
b43a57bb 2003rebalance:
11e33f6a 2004 /* Allocate without watermarks if the context allows */
341ce06f
PZ
2005 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2006 page = __alloc_pages_high_priority(gfp_mask, order,
2007 zonelist, high_zoneidx, nodemask,
2008 preferred_zone, migratetype);
2009 if (page)
2010 goto got_pg;
1da177e4
LT
2011 }
2012
2013 /* Atomic allocations - we can't balance anything */
2014 if (!wait)
2015 goto nopage;
2016
341ce06f
PZ
2017 /* Avoid recursion of direct reclaim */
2018 if (p->flags & PF_MEMALLOC)
2019 goto nopage;
2020
6583bb64
DR
2021 /* Avoid allocations with no watermarks from looping endlessly */
2022 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2023 goto nopage;
2024
56de7263
MG
2025 /* Try direct compaction */
2026 page = __alloc_pages_direct_compact(gfp_mask, order,
2027 zonelist, high_zoneidx,
2028 nodemask,
2029 alloc_flags, preferred_zone,
2030 migratetype, &did_some_progress);
2031 if (page)
2032 goto got_pg;
2033
11e33f6a
MG
2034 /* Try direct reclaim and then allocating */
2035 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2036 zonelist, high_zoneidx,
2037 nodemask,
5117f45d 2038 alloc_flags, preferred_zone,
3dd28266 2039 migratetype, &did_some_progress);
11e33f6a
MG
2040 if (page)
2041 goto got_pg;
1da177e4 2042
e33c3b5e 2043 /*
11e33f6a
MG
2044 * If we failed to make any progress reclaiming, then we are
2045 * running out of options and have to consider going OOM
e33c3b5e 2046 */
11e33f6a
MG
2047 if (!did_some_progress) {
2048 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
7f33d49a
RW
2049 if (oom_killer_disabled)
2050 goto nopage;
11e33f6a
MG
2051 page = __alloc_pages_may_oom(gfp_mask, order,
2052 zonelist, high_zoneidx,
3dd28266
MG
2053 nodemask, preferred_zone,
2054 migratetype);
11e33f6a
MG
2055 if (page)
2056 goto got_pg;
1da177e4 2057
03668b3c
DR
2058 if (!(gfp_mask & __GFP_NOFAIL)) {
2059 /*
2060 * The oom killer is not called for high-order
2061 * allocations that may fail, so if no progress
2062 * is being made, there are no other options and
2063 * retrying is unlikely to help.
2064 */
2065 if (order > PAGE_ALLOC_COSTLY_ORDER)
2066 goto nopage;
2067 /*
2068 * The oom killer is not called for lowmem
2069 * allocations to prevent needlessly killing
2070 * innocent tasks.
2071 */
2072 if (high_zoneidx < ZONE_NORMAL)
2073 goto nopage;
2074 }
e2c55dc8 2075
ff0ceb9d
DR
2076 goto restart;
2077 }
1da177e4
LT
2078 }
2079
11e33f6a 2080 /* Check if we should retry the allocation */
a41f24ea 2081 pages_reclaimed += did_some_progress;
11e33f6a
MG
2082 if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2083 /* Wait for some write requests to complete then retry */
8aa7e847 2084 congestion_wait(BLK_RW_ASYNC, HZ/50);
1da177e4
LT
2085 goto rebalance;
2086 }
2087
2088nopage:
2089 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2090 printk(KERN_WARNING "%s: page allocation failure."
2091 " order:%d, mode:0x%x\n",
2092 p->comm, order, gfp_mask);
2093 dump_stack();
578c2fd6 2094 show_mem();
1da177e4 2095 }
b1eeab67 2096 return page;
1da177e4 2097got_pg:
b1eeab67
VN
2098 if (kmemcheck_enabled)
2099 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1da177e4 2100 return page;
11e33f6a 2101
1da177e4 2102}
11e33f6a
MG
2103
2104/*
2105 * This is the 'heart' of the zoned buddy allocator.
2106 */
2107struct page *
2108__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2109 struct zonelist *zonelist, nodemask_t *nodemask)
2110{
2111 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5117f45d 2112 struct zone *preferred_zone;
11e33f6a 2113 struct page *page;
3dd28266 2114 int migratetype = allocflags_to_migratetype(gfp_mask);
11e33f6a 2115
dcce284a
BH
2116 gfp_mask &= gfp_allowed_mask;
2117
11e33f6a
MG
2118 lockdep_trace_alloc(gfp_mask);
2119
2120 might_sleep_if(gfp_mask & __GFP_WAIT);
2121
2122 if (should_fail_alloc_page(gfp_mask, order))
2123 return NULL;
2124
2125 /*
2126 * Check the zones suitable for the gfp_mask contain at least one
2127 * valid zone. It's possible to have an empty zonelist as a result
2128 * of GFP_THISNODE and a memoryless node
2129 */
2130 if (unlikely(!zonelist->_zonerefs->zone))
2131 return NULL;
2132
c0ff7453 2133 get_mems_allowed();
5117f45d
MG
2134 /* The preferred zone is used for statistics later */
2135 first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
c0ff7453
MX
2136 if (!preferred_zone) {
2137 put_mems_allowed();
5117f45d 2138 return NULL;
c0ff7453 2139 }
5117f45d
MG
2140
2141 /* First allocation attempt */
11e33f6a 2142 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
5117f45d 2143 zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
3dd28266 2144 preferred_zone, migratetype);
11e33f6a
MG
2145 if (unlikely(!page))
2146 page = __alloc_pages_slowpath(gfp_mask, order,
5117f45d 2147 zonelist, high_zoneidx, nodemask,
3dd28266 2148 preferred_zone, migratetype);
c0ff7453 2149 put_mems_allowed();
11e33f6a 2150
4b4f278c 2151 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
11e33f6a 2152 return page;
1da177e4 2153}
d239171e 2154EXPORT_SYMBOL(__alloc_pages_nodemask);
1da177e4
LT
2155
2156/*
2157 * Common helper functions.
2158 */
920c7a5d 2159unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1da177e4 2160{
945a1113
AM
2161 struct page *page;
2162
2163 /*
2164 * __get_free_pages() returns a 32-bit address, which cannot represent
2165 * a highmem page
2166 */
2167 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2168
1da177e4
LT
2169 page = alloc_pages(gfp_mask, order);
2170 if (!page)
2171 return 0;
2172 return (unsigned long) page_address(page);
2173}
1da177e4
LT
2174EXPORT_SYMBOL(__get_free_pages);
2175
920c7a5d 2176unsigned long get_zeroed_page(gfp_t gfp_mask)
1da177e4 2177{
945a1113 2178 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1da177e4 2179}
1da177e4
LT
2180EXPORT_SYMBOL(get_zeroed_page);
2181
2182void __pagevec_free(struct pagevec *pvec)
2183{
2184 int i = pagevec_count(pvec);
2185
4b4f278c
MG
2186 while (--i >= 0) {
2187 trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
1da177e4 2188 free_hot_cold_page(pvec->pages[i], pvec->cold);
4b4f278c 2189 }
1da177e4
LT
2190}
2191
920c7a5d 2192void __free_pages(struct page *page, unsigned int order)
1da177e4 2193{
b5810039 2194 if (put_page_testzero(page)) {
1da177e4 2195 if (order == 0)
fc91668e 2196 free_hot_cold_page(page, 0);
1da177e4
LT
2197 else
2198 __free_pages_ok(page, order);
2199 }
2200}
2201
2202EXPORT_SYMBOL(__free_pages);
2203
920c7a5d 2204void free_pages(unsigned long addr, unsigned int order)
1da177e4
LT
2205{
2206 if (addr != 0) {
725d704e 2207 VM_BUG_ON(!virt_addr_valid((void *)addr));
1da177e4
LT
2208 __free_pages(virt_to_page((void *)addr), order);
2209 }
2210}
2211
2212EXPORT_SYMBOL(free_pages);
2213
2be0ffe2
TT
2214/**
2215 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2216 * @size: the number of bytes to allocate
2217 * @gfp_mask: GFP flags for the allocation
2218 *
2219 * This function is similar to alloc_pages(), except that it allocates the
2220 * minimum number of pages to satisfy the request. alloc_pages() can only
2221 * allocate memory in power-of-two pages.
2222 *
2223 * This function is also limited by MAX_ORDER.
2224 *
2225 * Memory allocated by this function must be released by free_pages_exact().
2226 */
2227void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2228{
2229 unsigned int order = get_order(size);
2230 unsigned long addr;
2231
2232 addr = __get_free_pages(gfp_mask, order);
2233 if (addr) {
2234 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2235 unsigned long used = addr + PAGE_ALIGN(size);
2236
5bfd7560 2237 split_page(virt_to_page((void *)addr), order);
2be0ffe2
TT
2238 while (used < alloc_end) {
2239 free_page(used);
2240 used += PAGE_SIZE;
2241 }
2242 }
2243
2244 return (void *)addr;
2245}
2246EXPORT_SYMBOL(alloc_pages_exact);
2247
2248/**
2249 * free_pages_exact - release memory allocated via alloc_pages_exact()
2250 * @virt: the value returned by alloc_pages_exact.
2251 * @size: size of allocation, same value as passed to alloc_pages_exact().
2252 *
2253 * Release the memory allocated by a previous call to alloc_pages_exact.
2254 */
2255void free_pages_exact(void *virt, size_t size)
2256{
2257 unsigned long addr = (unsigned long)virt;
2258 unsigned long end = addr + PAGE_ALIGN(size);
2259
2260 while (addr < end) {
2261 free_page(addr);
2262 addr += PAGE_SIZE;
2263 }
2264}
2265EXPORT_SYMBOL(free_pages_exact);
2266
1da177e4
LT
2267static unsigned int nr_free_zone_pages(int offset)
2268{
dd1a239f 2269 struct zoneref *z;
54a6eb5c
MG
2270 struct zone *zone;
2271
e310fd43 2272 /* Just pick one node, since fallback list is circular */
1da177e4
LT
2273 unsigned int sum = 0;
2274
0e88460d 2275 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1da177e4 2276
54a6eb5c 2277 for_each_zone_zonelist(zone, z, zonelist, offset) {
e310fd43 2278 unsigned long size = zone->present_pages;
41858966 2279 unsigned long high = high_wmark_pages(zone);
e310fd43
MB
2280 if (size > high)
2281 sum += size - high;
1da177e4
LT
2282 }
2283
2284 return sum;
2285}
2286
2287/*
2288 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2289 */
2290unsigned int nr_free_buffer_pages(void)
2291{
af4ca457 2292 return nr_free_zone_pages(gfp_zone(GFP_USER));
1da177e4 2293}
c2f1a551 2294EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1da177e4
LT
2295
2296/*
2297 * Amount of free RAM allocatable within all zones
2298 */
2299unsigned int nr_free_pagecache_pages(void)
2300{
2a1e274a 2301 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1da177e4 2302}
08e0f6a9
CL
2303
2304static inline void show_node(struct zone *zone)
1da177e4 2305{
08e0f6a9 2306 if (NUMA_BUILD)
25ba77c1 2307 printk("Node %d ", zone_to_nid(zone));
1da177e4 2308}
1da177e4 2309
1da177e4
LT
2310void si_meminfo(struct sysinfo *val)
2311{
2312 val->totalram = totalram_pages;
2313 val->sharedram = 0;
d23ad423 2314 val->freeram = global_page_state(NR_FREE_PAGES);
1da177e4 2315 val->bufferram = nr_blockdev_pages();
1da177e4
LT
2316 val->totalhigh = totalhigh_pages;
2317 val->freehigh = nr_free_highpages();
1da177e4
LT
2318 val->mem_unit = PAGE_SIZE;
2319}
2320
2321EXPORT_SYMBOL(si_meminfo);
2322
2323#ifdef CONFIG_NUMA
2324void si_meminfo_node(struct sysinfo *val, int nid)
2325{
2326 pg_data_t *pgdat = NODE_DATA(nid);
2327
2328 val->totalram = pgdat->node_present_pages;
d23ad423 2329 val->freeram = node_page_state(nid, NR_FREE_PAGES);
98d2b0eb 2330#ifdef CONFIG_HIGHMEM
1da177e4 2331 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
d23ad423
CL
2332 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2333 NR_FREE_PAGES);
98d2b0eb
CL
2334#else
2335 val->totalhigh = 0;
2336 val->freehigh = 0;
2337#endif
1da177e4
LT
2338 val->mem_unit = PAGE_SIZE;
2339}
2340#endif
2341
2342#define K(x) ((x) << (PAGE_SHIFT-10))
2343
2344/*
2345 * Show free area list (used inside shift_scroll-lock stuff)
2346 * We also calculate the percentage fragmentation. We do this by counting the
2347 * memory on each free list with the exception of the first item on the list.
2348 */
2349void show_free_areas(void)
2350{
c7241913 2351 int cpu;
1da177e4
LT
2352 struct zone *zone;
2353
ee99c71c 2354 for_each_populated_zone(zone) {
c7241913
JS
2355 show_node(zone);
2356 printk("%s per-cpu:\n", zone->name);
1da177e4 2357
6b482c67 2358 for_each_online_cpu(cpu) {
1da177e4
LT
2359 struct per_cpu_pageset *pageset;
2360
99dcc3e5 2361 pageset = per_cpu_ptr(zone->pageset, cpu);
1da177e4 2362
3dfa5721
CL
2363 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2364 cpu, pageset->pcp.high,
2365 pageset->pcp.batch, pageset->pcp.count);
1da177e4
LT
2366 }
2367 }
2368
a731286d
KM
2369 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2370 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
7b854121 2371 " unevictable:%lu"
b76146ed 2372 " dirty:%lu writeback:%lu unstable:%lu\n"
3701b033 2373 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4b02108a 2374 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
4f98a2fe 2375 global_page_state(NR_ACTIVE_ANON),
4f98a2fe 2376 global_page_state(NR_INACTIVE_ANON),
a731286d
KM
2377 global_page_state(NR_ISOLATED_ANON),
2378 global_page_state(NR_ACTIVE_FILE),
4f98a2fe 2379 global_page_state(NR_INACTIVE_FILE),
a731286d 2380 global_page_state(NR_ISOLATED_FILE),
7b854121 2381 global_page_state(NR_UNEVICTABLE),
b1e7a8fd 2382 global_page_state(NR_FILE_DIRTY),
ce866b34 2383 global_page_state(NR_WRITEBACK),
fd39fc85 2384 global_page_state(NR_UNSTABLE_NFS),
d23ad423 2385 global_page_state(NR_FREE_PAGES),
3701b033
KM
2386 global_page_state(NR_SLAB_RECLAIMABLE),
2387 global_page_state(NR_SLAB_UNRECLAIMABLE),
65ba55f5 2388 global_page_state(NR_FILE_MAPPED),
4b02108a 2389 global_page_state(NR_SHMEM),
a25700a5
AM
2390 global_page_state(NR_PAGETABLE),
2391 global_page_state(NR_BOUNCE));
1da177e4 2392
ee99c71c 2393 for_each_populated_zone(zone) {
1da177e4
LT
2394 int i;
2395
2396 show_node(zone);
2397 printk("%s"
2398 " free:%lukB"
2399 " min:%lukB"
2400 " low:%lukB"
2401 " high:%lukB"
4f98a2fe
RR
2402 " active_anon:%lukB"
2403 " inactive_anon:%lukB"
2404 " active_file:%lukB"
2405 " inactive_file:%lukB"
7b854121 2406 " unevictable:%lukB"
a731286d
KM
2407 " isolated(anon):%lukB"
2408 " isolated(file):%lukB"
1da177e4 2409 " present:%lukB"
4a0aa73f
KM
2410 " mlocked:%lukB"
2411 " dirty:%lukB"
2412 " writeback:%lukB"
2413 " mapped:%lukB"
4b02108a 2414 " shmem:%lukB"
4a0aa73f
KM
2415 " slab_reclaimable:%lukB"
2416 " slab_unreclaimable:%lukB"
c6a7f572 2417 " kernel_stack:%lukB"
4a0aa73f
KM
2418 " pagetables:%lukB"
2419 " unstable:%lukB"
2420 " bounce:%lukB"
2421 " writeback_tmp:%lukB"
1da177e4
LT
2422 " pages_scanned:%lu"
2423 " all_unreclaimable? %s"
2424 "\n",
2425 zone->name,
d23ad423 2426 K(zone_page_state(zone, NR_FREE_PAGES)),
41858966
MG
2427 K(min_wmark_pages(zone)),
2428 K(low_wmark_pages(zone)),
2429 K(high_wmark_pages(zone)),
4f98a2fe
RR
2430 K(zone_page_state(zone, NR_ACTIVE_ANON)),
2431 K(zone_page_state(zone, NR_INACTIVE_ANON)),
2432 K(zone_page_state(zone, NR_ACTIVE_FILE)),
2433 K(zone_page_state(zone, NR_INACTIVE_FILE)),
7b854121 2434 K(zone_page_state(zone, NR_UNEVICTABLE)),
a731286d
KM
2435 K(zone_page_state(zone, NR_ISOLATED_ANON)),
2436 K(zone_page_state(zone, NR_ISOLATED_FILE)),
1da177e4 2437 K(zone->present_pages),
4a0aa73f
KM
2438 K(zone_page_state(zone, NR_MLOCK)),
2439 K(zone_page_state(zone, NR_FILE_DIRTY)),
2440 K(zone_page_state(zone, NR_WRITEBACK)),
2441 K(zone_page_state(zone, NR_FILE_MAPPED)),
4b02108a 2442 K(zone_page_state(zone, NR_SHMEM)),
4a0aa73f
KM
2443 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2444 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
c6a7f572
KM
2445 zone_page_state(zone, NR_KERNEL_STACK) *
2446 THREAD_SIZE / 1024,
4a0aa73f
KM
2447 K(zone_page_state(zone, NR_PAGETABLE)),
2448 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2449 K(zone_page_state(zone, NR_BOUNCE)),
2450 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
1da177e4 2451 zone->pages_scanned,
93e4a89a 2452 (zone->all_unreclaimable ? "yes" : "no")
1da177e4
LT
2453 );
2454 printk("lowmem_reserve[]:");
2455 for (i = 0; i < MAX_NR_ZONES; i++)
2456 printk(" %lu", zone->lowmem_reserve[i]);
2457 printk("\n");
2458 }
2459
ee99c71c 2460 for_each_populated_zone(zone) {
8f9de51a 2461 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1da177e4
LT
2462
2463 show_node(zone);
2464 printk("%s: ", zone->name);
1da177e4
LT
2465
2466 spin_lock_irqsave(&zone->lock, flags);
2467 for (order = 0; order < MAX_ORDER; order++) {
8f9de51a
KK
2468 nr[order] = zone->free_area[order].nr_free;
2469 total += nr[order] << order;
1da177e4
LT
2470 }
2471 spin_unlock_irqrestore(&zone->lock, flags);
8f9de51a
KK
2472 for (order = 0; order < MAX_ORDER; order++)
2473 printk("%lu*%lukB ", nr[order], K(1UL) << order);
1da177e4
LT
2474 printk("= %lukB\n", K(total));
2475 }
2476
e6f3602d
LW
2477 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2478
1da177e4
LT
2479 show_swap_cache_info();
2480}
2481
19770b32
MG
2482static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2483{
2484 zoneref->zone = zone;
2485 zoneref->zone_idx = zone_idx(zone);
2486}
2487
1da177e4
LT
2488/*
2489 * Builds allocation fallback zone lists.
1a93205b
CL
2490 *
2491 * Add all populated zones of a node to the zonelist.
1da177e4 2492 */
f0c0b2b8
KH
2493static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2494 int nr_zones, enum zone_type zone_type)
1da177e4 2495{
1a93205b
CL
2496 struct zone *zone;
2497
98d2b0eb 2498 BUG_ON(zone_type >= MAX_NR_ZONES);
2f6726e5 2499 zone_type++;
02a68a5e
CL
2500
2501 do {
2f6726e5 2502 zone_type--;
070f8032 2503 zone = pgdat->node_zones + zone_type;
1a93205b 2504 if (populated_zone(zone)) {
dd1a239f
MG
2505 zoneref_set_zone(zone,
2506 &zonelist->_zonerefs[nr_zones++]);
070f8032 2507 check_highest_zone(zone_type);
1da177e4 2508 }
02a68a5e 2509
2f6726e5 2510 } while (zone_type);
070f8032 2511 return nr_zones;
1da177e4
LT
2512}
2513
f0c0b2b8
KH
2514
2515/*
2516 * zonelist_order:
2517 * 0 = automatic detection of better ordering.
2518 * 1 = order by ([node] distance, -zonetype)
2519 * 2 = order by (-zonetype, [node] distance)
2520 *
2521 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2522 * the same zonelist. So only NUMA can configure this param.
2523 */
2524#define ZONELIST_ORDER_DEFAULT 0
2525#define ZONELIST_ORDER_NODE 1
2526#define ZONELIST_ORDER_ZONE 2
2527
2528/* zonelist order in the kernel.
2529 * set_zonelist_order() will set this to NODE or ZONE.
2530 */
2531static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2532static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2533
2534
1da177e4 2535#ifdef CONFIG_NUMA
f0c0b2b8
KH
2536/* The value user specified ....changed by config */
2537static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2538/* string for sysctl */
2539#define NUMA_ZONELIST_ORDER_LEN 16
2540char numa_zonelist_order[16] = "default";
2541
2542/*
2543 * interface for configure zonelist ordering.
2544 * command line option "numa_zonelist_order"
2545 * = "[dD]efault - default, automatic configuration.
2546 * = "[nN]ode - order by node locality, then by zone within node
2547 * = "[zZ]one - order by zone, then by locality within zone
2548 */
2549
2550static int __parse_numa_zonelist_order(char *s)
2551{
2552 if (*s == 'd' || *s == 'D') {
2553 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2554 } else if (*s == 'n' || *s == 'N') {
2555 user_zonelist_order = ZONELIST_ORDER_NODE;
2556 } else if (*s == 'z' || *s == 'Z') {
2557 user_zonelist_order = ZONELIST_ORDER_ZONE;
2558 } else {
2559 printk(KERN_WARNING
2560 "Ignoring invalid numa_zonelist_order value: "
2561 "%s\n", s);
2562 return -EINVAL;
2563 }
2564 return 0;
2565}
2566
2567static __init int setup_numa_zonelist_order(char *s)
2568{
2569 if (s)
2570 return __parse_numa_zonelist_order(s);
2571 return 0;
2572}
2573early_param("numa_zonelist_order", setup_numa_zonelist_order);
2574
2575/*
2576 * sysctl handler for numa_zonelist_order
2577 */
2578int numa_zonelist_order_handler(ctl_table *table, int write,
8d65af78 2579 void __user *buffer, size_t *length,
f0c0b2b8
KH
2580 loff_t *ppos)
2581{
2582 char saved_string[NUMA_ZONELIST_ORDER_LEN];
2583 int ret;
443c6f14 2584 static DEFINE_MUTEX(zl_order_mutex);
f0c0b2b8 2585
443c6f14 2586 mutex_lock(&zl_order_mutex);
f0c0b2b8 2587 if (write)
443c6f14 2588 strcpy(saved_string, (char*)table->data);
8d65af78 2589 ret = proc_dostring(table, write, buffer, length, ppos);
f0c0b2b8 2590 if (ret)
443c6f14 2591 goto out;
f0c0b2b8
KH
2592 if (write) {
2593 int oldval = user_zonelist_order;
2594 if (__parse_numa_zonelist_order((char*)table->data)) {
2595 /*
2596 * bogus value. restore saved string
2597 */
2598 strncpy((char*)table->data, saved_string,
2599 NUMA_ZONELIST_ORDER_LEN);
2600 user_zonelist_order = oldval;
4eaf3f64
HL
2601 } else if (oldval != user_zonelist_order) {
2602 mutex_lock(&zonelists_mutex);
1f522509 2603 build_all_zonelists(NULL);
4eaf3f64
HL
2604 mutex_unlock(&zonelists_mutex);
2605 }
f0c0b2b8 2606 }
443c6f14
AK
2607out:
2608 mutex_unlock(&zl_order_mutex);
2609 return ret;
f0c0b2b8
KH
2610}
2611
2612
62bc62a8 2613#define MAX_NODE_LOAD (nr_online_nodes)
f0c0b2b8
KH
2614static int node_load[MAX_NUMNODES];
2615
1da177e4 2616/**
4dc3b16b 2617 * find_next_best_node - find the next node that should appear in a given node's fallback list
1da177e4
LT
2618 * @node: node whose fallback list we're appending
2619 * @used_node_mask: nodemask_t of already used nodes
2620 *
2621 * We use a number of factors to determine which is the next node that should
2622 * appear on a given node's fallback list. The node should not have appeared
2623 * already in @node's fallback list, and it should be the next closest node
2624 * according to the distance array (which contains arbitrary distance values
2625 * from each node to each node in the system), and should also prefer nodes
2626 * with no CPUs, since presumably they'll have very little allocation pressure
2627 * on them otherwise.
2628 * It returns -1 if no node is found.
2629 */
f0c0b2b8 2630static int find_next_best_node(int node, nodemask_t *used_node_mask)
1da177e4 2631{
4cf808eb 2632 int n, val;
1da177e4
LT
2633 int min_val = INT_MAX;
2634 int best_node = -1;
a70f7302 2635 const struct cpumask *tmp = cpumask_of_node(0);
1da177e4 2636
4cf808eb
LT
2637 /* Use the local node if we haven't already */
2638 if (!node_isset(node, *used_node_mask)) {
2639 node_set(node, *used_node_mask);
2640 return node;
2641 }
1da177e4 2642
37b07e41 2643 for_each_node_state(n, N_HIGH_MEMORY) {
1da177e4
LT
2644
2645 /* Don't want a node to appear more than once */
2646 if (node_isset(n, *used_node_mask))
2647 continue;
2648
1da177e4
LT
2649 /* Use the distance array to find the distance */
2650 val = node_distance(node, n);
2651
4cf808eb
LT
2652 /* Penalize nodes under us ("prefer the next node") */
2653 val += (n < node);
2654
1da177e4 2655 /* Give preference to headless and unused nodes */
a70f7302
RR
2656 tmp = cpumask_of_node(n);
2657 if (!cpumask_empty(tmp))
1da177e4
LT
2658 val += PENALTY_FOR_NODE_WITH_CPUS;
2659
2660 /* Slight preference for less loaded node */
2661 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2662 val += node_load[n];
2663
2664 if (val < min_val) {
2665 min_val = val;
2666 best_node = n;
2667 }
2668 }
2669
2670 if (best_node >= 0)
2671 node_set(best_node, *used_node_mask);
2672
2673 return best_node;
2674}
2675
f0c0b2b8
KH
2676
2677/*
2678 * Build zonelists ordered by node and zones within node.
2679 * This results in maximum locality--normal zone overflows into local
2680 * DMA zone, if any--but risks exhausting DMA zone.
2681 */
2682static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
1da177e4 2683{
f0c0b2b8 2684 int j;
1da177e4 2685 struct zonelist *zonelist;
f0c0b2b8 2686
54a6eb5c 2687 zonelist = &pgdat->node_zonelists[0];
dd1a239f 2688 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
54a6eb5c
MG
2689 ;
2690 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2691 MAX_NR_ZONES - 1);
dd1a239f
MG
2692 zonelist->_zonerefs[j].zone = NULL;
2693 zonelist->_zonerefs[j].zone_idx = 0;
f0c0b2b8
KH
2694}
2695
523b9458
CL
2696/*
2697 * Build gfp_thisnode zonelists
2698 */
2699static void build_thisnode_zonelists(pg_data_t *pgdat)
2700{
523b9458
CL
2701 int j;
2702 struct zonelist *zonelist;
2703
54a6eb5c
MG
2704 zonelist = &pgdat->node_zonelists[1];
2705 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
dd1a239f
MG
2706 zonelist->_zonerefs[j].zone = NULL;
2707 zonelist->_zonerefs[j].zone_idx = 0;
523b9458
CL
2708}
2709
f0c0b2b8
KH
2710/*
2711 * Build zonelists ordered by zone and nodes within zones.
2712 * This results in conserving DMA zone[s] until all Normal memory is
2713 * exhausted, but results in overflowing to remote node while memory
2714 * may still exist in local DMA zone.
2715 */
2716static int node_order[MAX_NUMNODES];
2717
2718static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2719{
f0c0b2b8
KH
2720 int pos, j, node;
2721 int zone_type; /* needs to be signed */
2722 struct zone *z;
2723 struct zonelist *zonelist;
2724
54a6eb5c
MG
2725 zonelist = &pgdat->node_zonelists[0];
2726 pos = 0;
2727 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2728 for (j = 0; j < nr_nodes; j++) {
2729 node = node_order[j];
2730 z = &NODE_DATA(node)->node_zones[zone_type];
2731 if (populated_zone(z)) {
dd1a239f
MG
2732 zoneref_set_zone(z,
2733 &zonelist->_zonerefs[pos++]);
54a6eb5c 2734 check_highest_zone(zone_type);
f0c0b2b8
KH
2735 }
2736 }
f0c0b2b8 2737 }
dd1a239f
MG
2738 zonelist->_zonerefs[pos].zone = NULL;
2739 zonelist->_zonerefs[pos].zone_idx = 0;
f0c0b2b8
KH
2740}
2741
2742static int default_zonelist_order(void)
2743{
2744 int nid, zone_type;
2745 unsigned long low_kmem_size,total_size;
2746 struct zone *z;
2747 int average_size;
2748 /*
88393161 2749 * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
f0c0b2b8
KH
2750 * If they are really small and used heavily, the system can fall
2751 * into OOM very easily.
e325c90f 2752 * This function detect ZONE_DMA/DMA32 size and configures zone order.
f0c0b2b8
KH
2753 */
2754 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2755 low_kmem_size = 0;
2756 total_size = 0;
2757 for_each_online_node(nid) {
2758 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2759 z = &NODE_DATA(nid)->node_zones[zone_type];
2760 if (populated_zone(z)) {
2761 if (zone_type < ZONE_NORMAL)
2762 low_kmem_size += z->present_pages;
2763 total_size += z->present_pages;
e325c90f
DR
2764 } else if (zone_type == ZONE_NORMAL) {
2765 /*
2766 * If any node has only lowmem, then node order
2767 * is preferred to allow kernel allocations
2768 * locally; otherwise, they can easily infringe
2769 * on other nodes when there is an abundance of
2770 * lowmem available to allocate from.
2771 */
2772 return ZONELIST_ORDER_NODE;
f0c0b2b8
KH
2773 }
2774 }
2775 }
2776 if (!low_kmem_size || /* there are no DMA area. */
2777 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2778 return ZONELIST_ORDER_NODE;
2779 /*
2780 * look into each node's config.
2781 * If there is a node whose DMA/DMA32 memory is very big area on
2782 * local memory, NODE_ORDER may be suitable.
2783 */
37b07e41
LS
2784 average_size = total_size /
2785 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
f0c0b2b8
KH
2786 for_each_online_node(nid) {
2787 low_kmem_size = 0;
2788 total_size = 0;
2789 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2790 z = &NODE_DATA(nid)->node_zones[zone_type];
2791 if (populated_zone(z)) {
2792 if (zone_type < ZONE_NORMAL)
2793 low_kmem_size += z->present_pages;
2794 total_size += z->present_pages;
2795 }
2796 }
2797 if (low_kmem_size &&
2798 total_size > average_size && /* ignore small node */
2799 low_kmem_size > total_size * 70/100)
2800 return ZONELIST_ORDER_NODE;
2801 }
2802 return ZONELIST_ORDER_ZONE;
2803}
2804
2805static void set_zonelist_order(void)
2806{
2807 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2808 current_zonelist_order = default_zonelist_order();
2809 else
2810 current_zonelist_order = user_zonelist_order;
2811}
2812
2813static void build_zonelists(pg_data_t *pgdat)
2814{
2815 int j, node, load;
2816 enum zone_type i;
1da177e4 2817 nodemask_t used_mask;
f0c0b2b8
KH
2818 int local_node, prev_node;
2819 struct zonelist *zonelist;
2820 int order = current_zonelist_order;
1da177e4
LT
2821
2822 /* initialize zonelists */
523b9458 2823 for (i = 0; i < MAX_ZONELISTS; i++) {
1da177e4 2824 zonelist = pgdat->node_zonelists + i;
dd1a239f
MG
2825 zonelist->_zonerefs[0].zone = NULL;
2826 zonelist->_zonerefs[0].zone_idx = 0;
1da177e4
LT
2827 }
2828
2829 /* NUMA-aware ordering of nodes */
2830 local_node = pgdat->node_id;
62bc62a8 2831 load = nr_online_nodes;
1da177e4
LT
2832 prev_node = local_node;
2833 nodes_clear(used_mask);
f0c0b2b8 2834
f0c0b2b8
KH
2835 memset(node_order, 0, sizeof(node_order));
2836 j = 0;
2837
1da177e4 2838 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
9eeff239
CL
2839 int distance = node_distance(local_node, node);
2840
2841 /*
2842 * If another node is sufficiently far away then it is better
2843 * to reclaim pages in a zone before going off node.
2844 */
2845 if (distance > RECLAIM_DISTANCE)
2846 zone_reclaim_mode = 1;
2847
1da177e4
LT
2848 /*
2849 * We don't want to pressure a particular node.
2850 * So adding penalty to the first node in same
2851 * distance group to make it round-robin.
2852 */
9eeff239 2853 if (distance != node_distance(local_node, prev_node))
f0c0b2b8
KH
2854 node_load[node] = load;
2855
1da177e4
LT
2856 prev_node = node;
2857 load--;
f0c0b2b8
KH
2858 if (order == ZONELIST_ORDER_NODE)
2859 build_zonelists_in_node_order(pgdat, node);
2860 else
2861 node_order[j++] = node; /* remember order */
2862 }
1da177e4 2863
f0c0b2b8
KH
2864 if (order == ZONELIST_ORDER_ZONE) {
2865 /* calculate node order -- i.e., DMA last! */
2866 build_zonelists_in_zone_order(pgdat, j);
1da177e4 2867 }
523b9458
CL
2868
2869 build_thisnode_zonelists(pgdat);
1da177e4
LT
2870}
2871
9276b1bc 2872/* Construct the zonelist performance cache - see further mmzone.h */
f0c0b2b8 2873static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2874{
54a6eb5c
MG
2875 struct zonelist *zonelist;
2876 struct zonelist_cache *zlc;
dd1a239f 2877 struct zoneref *z;
9276b1bc 2878
54a6eb5c
MG
2879 zonelist = &pgdat->node_zonelists[0];
2880 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2881 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
dd1a239f
MG
2882 for (z = zonelist->_zonerefs; z->zone; z++)
2883 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
9276b1bc
PJ
2884}
2885
7aac7898
LS
2886#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2887/*
2888 * Return node id of node used for "local" allocations.
2889 * I.e., first node id of first zone in arg node's generic zonelist.
2890 * Used for initializing percpu 'numa_mem', which is used primarily
2891 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
2892 */
2893int local_memory_node(int node)
2894{
2895 struct zone *zone;
2896
2897 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2898 gfp_zone(GFP_KERNEL),
2899 NULL,
2900 &zone);
2901 return zone->node;
2902}
2903#endif
f0c0b2b8 2904
1da177e4
LT
2905#else /* CONFIG_NUMA */
2906
f0c0b2b8
KH
2907static void set_zonelist_order(void)
2908{
2909 current_zonelist_order = ZONELIST_ORDER_ZONE;
2910}
2911
2912static void build_zonelists(pg_data_t *pgdat)
1da177e4 2913{
19655d34 2914 int node, local_node;
54a6eb5c
MG
2915 enum zone_type j;
2916 struct zonelist *zonelist;
1da177e4
LT
2917
2918 local_node = pgdat->node_id;
1da177e4 2919
54a6eb5c
MG
2920 zonelist = &pgdat->node_zonelists[0];
2921 j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
1da177e4 2922
54a6eb5c
MG
2923 /*
2924 * Now we build the zonelist so that it contains the zones
2925 * of all the other nodes.
2926 * We don't want to pressure a particular node, so when
2927 * building the zones for node N, we make sure that the
2928 * zones coming right after the local ones are those from
2929 * node N+1 (modulo N)
2930 */
2931 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2932 if (!node_online(node))
2933 continue;
2934 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2935 MAX_NR_ZONES - 1);
1da177e4 2936 }
54a6eb5c
MG
2937 for (node = 0; node < local_node; node++) {
2938 if (!node_online(node))
2939 continue;
2940 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2941 MAX_NR_ZONES - 1);
2942 }
2943
dd1a239f
MG
2944 zonelist->_zonerefs[j].zone = NULL;
2945 zonelist->_zonerefs[j].zone_idx = 0;
1da177e4
LT
2946}
2947
9276b1bc 2948/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
f0c0b2b8 2949static void build_zonelist_cache(pg_data_t *pgdat)
9276b1bc 2950{
54a6eb5c 2951 pgdat->node_zonelists[0].zlcache_ptr = NULL;
9276b1bc
PJ
2952}
2953
1da177e4
LT
2954#endif /* CONFIG_NUMA */
2955
99dcc3e5
CL
2956/*
2957 * Boot pageset table. One per cpu which is going to be used for all
2958 * zones and all nodes. The parameters will be set in such a way
2959 * that an item put on a list will immediately be handed over to
2960 * the buddy list. This is safe since pageset manipulation is done
2961 * with interrupts disabled.
2962 *
2963 * The boot_pagesets must be kept even after bootup is complete for
2964 * unused processors and/or zones. They do play a role for bootstrapping
2965 * hotplugged processors.
2966 *
2967 * zoneinfo_show() and maybe other functions do
2968 * not check if the processor is online before following the pageset pointer.
2969 * Other parts of the kernel may not check if the zone is available.
2970 */
2971static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
2972static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
1f522509 2973static void setup_zone_pageset(struct zone *zone);
99dcc3e5 2974
4eaf3f64
HL
2975/*
2976 * Global mutex to protect against size modification of zonelists
2977 * as well as to serialize pageset setup for the new populated zone.
2978 */
2979DEFINE_MUTEX(zonelists_mutex);
2980
9b1a4d38 2981/* return values int ....just for stop_machine() */
1f522509 2982static __init_refok int __build_all_zonelists(void *data)
1da177e4 2983{
6811378e 2984 int nid;
99dcc3e5 2985 int cpu;
9276b1bc 2986
7f9cfb31
BL
2987#ifdef CONFIG_NUMA
2988 memset(node_load, 0, sizeof(node_load));
2989#endif
9276b1bc 2990 for_each_online_node(nid) {
7ea1530a
CL
2991 pg_data_t *pgdat = NODE_DATA(nid);
2992
2993 build_zonelists(pgdat);
2994 build_zonelist_cache(pgdat);
9276b1bc 2995 }
99dcc3e5 2996
1f522509
HL
2997#ifdef CONFIG_MEMORY_HOTPLUG
2998 /* Setup real pagesets for the new zone */
2999 if (data) {
3000 struct zone *zone = data;
3001 setup_zone_pageset(zone);
3002 }
3003#endif
3004
99dcc3e5
CL
3005 /*
3006 * Initialize the boot_pagesets that are going to be used
3007 * for bootstrapping processors. The real pagesets for
3008 * each zone will be allocated later when the per cpu
3009 * allocator is available.
3010 *
3011 * boot_pagesets are used also for bootstrapping offline
3012 * cpus if the system is already booted because the pagesets
3013 * are needed to initialize allocators on a specific cpu too.
3014 * F.e. the percpu allocator needs the page allocator which
3015 * needs the percpu allocator in order to allocate its pagesets
3016 * (a chicken-egg dilemma).
3017 */
7aac7898 3018 for_each_possible_cpu(cpu) {
99dcc3e5
CL
3019 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3020
7aac7898
LS
3021#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3022 /*
3023 * We now know the "local memory node" for each node--
3024 * i.e., the node of the first zone in the generic zonelist.
3025 * Set up numa_mem percpu variable for on-line cpus. During
3026 * boot, only the boot cpu should be on-line; we'll init the
3027 * secondary cpus' numa_mem as they come on-line. During
3028 * node/memory hotplug, we'll fixup all on-line cpus.
3029 */
3030 if (cpu_online(cpu))
3031 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3032#endif
3033 }
3034
6811378e
YG
3035 return 0;
3036}
3037
4eaf3f64
HL
3038/*
3039 * Called with zonelists_mutex held always
3040 * unless system_state == SYSTEM_BOOTING.
3041 */
1f522509 3042void build_all_zonelists(void *data)
6811378e 3043{
f0c0b2b8
KH
3044 set_zonelist_order();
3045
6811378e 3046 if (system_state == SYSTEM_BOOTING) {
423b41d7 3047 __build_all_zonelists(NULL);
68ad8df4 3048 mminit_verify_zonelist();
6811378e
YG
3049 cpuset_init_current_mems_allowed();
3050 } else {
183ff22b 3051 /* we have to stop all cpus to guarantee there is no user
6811378e 3052 of zonelist */
1f522509 3053 stop_machine(__build_all_zonelists, data, NULL);
6811378e
YG
3054 /* cpuset refresh routine should be here */
3055 }
bd1e22b8 3056 vm_total_pages = nr_free_pagecache_pages();
9ef9acb0
MG
3057 /*
3058 * Disable grouping by mobility if the number of pages in the
3059 * system is too low to allow the mechanism to work. It would be
3060 * more accurate, but expensive to check per-zone. This check is
3061 * made on memory-hotadd so a system can start with mobility
3062 * disabled and enable it later
3063 */
d9c23400 3064 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
9ef9acb0
MG
3065 page_group_by_mobility_disabled = 1;
3066 else
3067 page_group_by_mobility_disabled = 0;
3068
3069 printk("Built %i zonelists in %s order, mobility grouping %s. "
3070 "Total pages: %ld\n",
62bc62a8 3071 nr_online_nodes,
f0c0b2b8 3072 zonelist_order_name[current_zonelist_order],
9ef9acb0 3073 page_group_by_mobility_disabled ? "off" : "on",
f0c0b2b8
KH
3074 vm_total_pages);
3075#ifdef CONFIG_NUMA
3076 printk("Policy zone: %s\n", zone_names[policy_zone]);
3077#endif
1da177e4
LT
3078}
3079
3080/*
3081 * Helper functions to size the waitqueue hash table.
3082 * Essentially these want to choose hash table sizes sufficiently
3083 * large so that collisions trying to wait on pages are rare.
3084 * But in fact, the number of active page waitqueues on typical
3085 * systems is ridiculously low, less than 200. So this is even
3086 * conservative, even though it seems large.
3087 *
3088 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3089 * waitqueues, i.e. the size of the waitq table given the number of pages.
3090 */
3091#define PAGES_PER_WAITQUEUE 256
3092
cca448fe 3093#ifndef CONFIG_MEMORY_HOTPLUG
02b694de 3094static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1da177e4
LT
3095{
3096 unsigned long size = 1;
3097
3098 pages /= PAGES_PER_WAITQUEUE;
3099
3100 while (size < pages)
3101 size <<= 1;
3102
3103 /*
3104 * Once we have dozens or even hundreds of threads sleeping
3105 * on IO we've got bigger problems than wait queue collision.
3106 * Limit the size of the wait table to a reasonable size.
3107 */
3108 size = min(size, 4096UL);
3109
3110 return max(size, 4UL);
3111}
cca448fe
YG
3112#else
3113/*
3114 * A zone's size might be changed by hot-add, so it is not possible to determine
3115 * a suitable size for its wait_table. So we use the maximum size now.
3116 *
3117 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3118 *
3119 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3120 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3121 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3122 *
3123 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3124 * or more by the traditional way. (See above). It equals:
3125 *
3126 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3127 * ia64(16K page size) : = ( 8G + 4M)byte.
3128 * powerpc (64K page size) : = (32G +16M)byte.
3129 */
3130static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3131{
3132 return 4096UL;
3133}
3134#endif
1da177e4
LT
3135
3136/*
3137 * This is an integer logarithm so that shifts can be used later
3138 * to extract the more random high bits from the multiplicative
3139 * hash function before the remainder is taken.
3140 */
3141static inline unsigned long wait_table_bits(unsigned long size)
3142{
3143 return ffz(~size);
3144}
3145
3146#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3147
56fd56b8 3148/*
d9c23400 3149 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
41858966
MG
3150 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3151 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
56fd56b8
MG
3152 * higher will lead to a bigger reserve which will get freed as contiguous
3153 * blocks as reclaim kicks in
3154 */
3155static void setup_zone_migrate_reserve(struct zone *zone)
3156{
3157 unsigned long start_pfn, pfn, end_pfn;
3158 struct page *page;
78986a67
MG
3159 unsigned long block_migratetype;
3160 int reserve;
56fd56b8
MG
3161
3162 /* Get the start pfn, end pfn and the number of blocks to reserve */
3163 start_pfn = zone->zone_start_pfn;
3164 end_pfn = start_pfn + zone->spanned_pages;
41858966 3165 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
d9c23400 3166 pageblock_order;
56fd56b8 3167
78986a67
MG
3168 /*
3169 * Reserve blocks are generally in place to help high-order atomic
3170 * allocations that are short-lived. A min_free_kbytes value that
3171 * would result in more than 2 reserve blocks for atomic allocations
3172 * is assumed to be in place to help anti-fragmentation for the
3173 * future allocation of hugepages at runtime.
3174 */
3175 reserve = min(2, reserve);
3176
d9c23400 3177 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
56fd56b8
MG
3178 if (!pfn_valid(pfn))
3179 continue;
3180 page = pfn_to_page(pfn);
3181
344c790e
AL
3182 /* Watch out for overlapping nodes */
3183 if (page_to_nid(page) != zone_to_nid(zone))
3184 continue;
3185
56fd56b8
MG
3186 /* Blocks with reserved pages will never free, skip them. */
3187 if (PageReserved(page))
3188 continue;
3189
3190 block_migratetype = get_pageblock_migratetype(page);
3191
3192 /* If this block is reserved, account for it */
3193 if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3194 reserve--;
3195 continue;
3196 }
3197
3198 /* Suitable for reserving if this block is movable */
3199 if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3200 set_pageblock_migratetype(page, MIGRATE_RESERVE);
3201 move_freepages_block(zone, page, MIGRATE_RESERVE);
3202 reserve--;
3203 continue;
3204 }
3205
3206 /*
3207 * If the reserve is met and this is a previous reserved block,
3208 * take it back
3209 */
3210 if (block_migratetype == MIGRATE_RESERVE) {
3211 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3212 move_freepages_block(zone, page, MIGRATE_MOVABLE);
3213 }
3214 }
3215}
ac0e5b7a 3216
1da177e4
LT
3217/*
3218 * Initially all pages are reserved - free ones are freed
3219 * up by free_all_bootmem() once the early boot process is
3220 * done. Non-atomic initialization, single-pass.
3221 */
c09b4240 3222void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
a2f3aa02 3223 unsigned long start_pfn, enum memmap_context context)
1da177e4 3224{
1da177e4 3225 struct page *page;
29751f69
AW
3226 unsigned long end_pfn = start_pfn + size;
3227 unsigned long pfn;
86051ca5 3228 struct zone *z;
1da177e4 3229
22b31eec
HD
3230 if (highest_memmap_pfn < end_pfn - 1)
3231 highest_memmap_pfn = end_pfn - 1;
3232
86051ca5 3233 z = &NODE_DATA(nid)->node_zones[zone];
cbe8dd4a 3234 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
a2f3aa02
DH
3235 /*
3236 * There can be holes in boot-time mem_map[]s
3237 * handed to this function. They do not
3238 * exist on hotplugged memory.
3239 */
3240 if (context == MEMMAP_EARLY) {
3241 if (!early_pfn_valid(pfn))
3242 continue;
3243 if (!early_pfn_in_nid(pfn, nid))
3244 continue;
3245 }
d41dee36
AW
3246 page = pfn_to_page(pfn);
3247 set_page_links(page, zone, nid, pfn);
708614e6 3248 mminit_verify_page_links(page, zone, nid, pfn);
7835e98b 3249 init_page_count(page);
1da177e4
LT
3250 reset_page_mapcount(page);
3251 SetPageReserved(page);
b2a0ac88
MG
3252 /*
3253 * Mark the block movable so that blocks are reserved for
3254 * movable at startup. This will force kernel allocations
3255 * to reserve their blocks rather than leaking throughout
3256 * the address space during boot when many long-lived
56fd56b8
MG
3257 * kernel allocations are made. Later some blocks near
3258 * the start are marked MIGRATE_RESERVE by
3259 * setup_zone_migrate_reserve()
86051ca5
KH
3260 *
3261 * bitmap is created for zone's valid pfn range. but memmap
3262 * can be created for invalid pages (for alignment)
3263 * check here not to call set_pageblock_migratetype() against
3264 * pfn out of zone.
b2a0ac88 3265 */
86051ca5
KH
3266 if ((z->zone_start_pfn <= pfn)
3267 && (pfn < z->zone_start_pfn + z->spanned_pages)
3268 && !(pfn & (pageblock_nr_pages - 1)))
56fd56b8 3269 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
b2a0ac88 3270
1da177e4
LT
3271 INIT_LIST_HEAD(&page->lru);
3272#ifdef WANT_PAGE_VIRTUAL
3273 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3274 if (!is_highmem_idx(zone))
3212c6be 3275 set_page_address(page, __va(pfn << PAGE_SHIFT));
1da177e4 3276#endif
1da177e4
LT
3277 }
3278}
3279
1e548deb 3280static void __meminit zone_init_free_lists(struct zone *zone)
1da177e4 3281{
b2a0ac88
MG
3282 int order, t;
3283 for_each_migratetype_order(order, t) {
3284 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1da177e4
LT
3285 zone->free_area[order].nr_free = 0;
3286 }
3287}
3288
3289#ifndef __HAVE_ARCH_MEMMAP_INIT
3290#define memmap_init(size, nid, zone, start_pfn) \
a2f3aa02 3291 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1da177e4
LT
3292#endif
3293
1d6f4e60 3294static int zone_batchsize(struct zone *zone)
e7c8d5c9 3295{
3a6be87f 3296#ifdef CONFIG_MMU
e7c8d5c9
CL
3297 int batch;
3298
3299 /*
3300 * The per-cpu-pages pools are set to around 1000th of the
ba56e91c 3301 * size of the zone. But no more than 1/2 of a meg.
e7c8d5c9
CL
3302 *
3303 * OK, so we don't know how big the cache is. So guess.
3304 */
3305 batch = zone->present_pages / 1024;
ba56e91c
SR
3306 if (batch * PAGE_SIZE > 512 * 1024)
3307 batch = (512 * 1024) / PAGE_SIZE;
e7c8d5c9
CL
3308 batch /= 4; /* We effectively *= 4 below */
3309 if (batch < 1)
3310 batch = 1;
3311
3312 /*
0ceaacc9
NP
3313 * Clamp the batch to a 2^n - 1 value. Having a power
3314 * of 2 value was found to be more likely to have
3315 * suboptimal cache aliasing properties in some cases.
e7c8d5c9 3316 *
0ceaacc9
NP
3317 * For example if 2 tasks are alternately allocating
3318 * batches of pages, one task can end up with a lot
3319 * of pages of one half of the possible page colors
3320 * and the other with pages of the other colors.
e7c8d5c9 3321 */
9155203a 3322 batch = rounddown_pow_of_two(batch + batch/2) - 1;
ba56e91c 3323
e7c8d5c9 3324 return batch;
3a6be87f
DH
3325
3326#else
3327 /* The deferral and batching of frees should be suppressed under NOMMU
3328 * conditions.
3329 *
3330 * The problem is that NOMMU needs to be able to allocate large chunks
3331 * of contiguous memory as there's no hardware page translation to
3332 * assemble apparent contiguous memory from discontiguous pages.
3333 *
3334 * Queueing large contiguous runs of pages for batching, however,
3335 * causes the pages to actually be freed in smaller chunks. As there
3336 * can be a significant delay between the individual batches being
3337 * recycled, this leads to the once large chunks of space being
3338 * fragmented and becoming unavailable for high-order allocations.
3339 */
3340 return 0;
3341#endif
e7c8d5c9
CL
3342}
3343
b69a7288 3344static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2caaad41
CL
3345{
3346 struct per_cpu_pages *pcp;
5f8dcc21 3347 int migratetype;
2caaad41 3348
1c6fe946
MD
3349 memset(p, 0, sizeof(*p));
3350
3dfa5721 3351 pcp = &p->pcp;
2caaad41 3352 pcp->count = 0;
2caaad41
CL
3353 pcp->high = 6 * batch;
3354 pcp->batch = max(1UL, 1 * batch);
5f8dcc21
MG
3355 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3356 INIT_LIST_HEAD(&pcp->lists[migratetype]);
2caaad41
CL
3357}
3358
8ad4b1fb
RS
3359/*
3360 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3361 * to the value high for the pageset p.
3362 */
3363
3364static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3365 unsigned long high)
3366{
3367 struct per_cpu_pages *pcp;
3368
3dfa5721 3369 pcp = &p->pcp;
8ad4b1fb
RS
3370 pcp->high = high;
3371 pcp->batch = max(1UL, high/4);
3372 if ((high/4) > (PAGE_SHIFT * 8))
3373 pcp->batch = PAGE_SHIFT * 8;
3374}
3375
319774e2
WF
3376static __meminit void setup_zone_pageset(struct zone *zone)
3377{
3378 int cpu;
3379
3380 zone->pageset = alloc_percpu(struct per_cpu_pageset);
3381
3382 for_each_possible_cpu(cpu) {
3383 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3384
3385 setup_pageset(pcp, zone_batchsize(zone));
3386
3387 if (percpu_pagelist_fraction)
3388 setup_pagelist_highmark(pcp,
3389 (zone->present_pages /
3390 percpu_pagelist_fraction));
3391 }
3392}
3393
2caaad41 3394/*
99dcc3e5
CL
3395 * Allocate per cpu pagesets and initialize them.
3396 * Before this call only boot pagesets were available.
e7c8d5c9 3397 */
99dcc3e5 3398void __init setup_per_cpu_pageset(void)
e7c8d5c9 3399{
99dcc3e5 3400 struct zone *zone;
e7c8d5c9 3401
319774e2
WF
3402 for_each_populated_zone(zone)
3403 setup_zone_pageset(zone);
e7c8d5c9
CL
3404}
3405
577a32f6 3406static noinline __init_refok
cca448fe 3407int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
ed8ece2e
DH
3408{
3409 int i;
3410 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe 3411 size_t alloc_size;
ed8ece2e
DH
3412
3413 /*
3414 * The per-page waitqueue mechanism uses hashed waitqueues
3415 * per zone.
3416 */
02b694de
YG
3417 zone->wait_table_hash_nr_entries =
3418 wait_table_hash_nr_entries(zone_size_pages);
3419 zone->wait_table_bits =
3420 wait_table_bits(zone->wait_table_hash_nr_entries);
cca448fe
YG
3421 alloc_size = zone->wait_table_hash_nr_entries
3422 * sizeof(wait_queue_head_t);
3423
cd94b9db 3424 if (!slab_is_available()) {
cca448fe
YG
3425 zone->wait_table = (wait_queue_head_t *)
3426 alloc_bootmem_node(pgdat, alloc_size);
3427 } else {
3428 /*
3429 * This case means that a zone whose size was 0 gets new memory
3430 * via memory hot-add.
3431 * But it may be the case that a new node was hot-added. In
3432 * this case vmalloc() will not be able to use this new node's
3433 * memory - this wait_table must be initialized to use this new
3434 * node itself as well.
3435 * To use this new node's memory, further consideration will be
3436 * necessary.
3437 */
8691f3a7 3438 zone->wait_table = vmalloc(alloc_size);
cca448fe
YG
3439 }
3440 if (!zone->wait_table)
3441 return -ENOMEM;
ed8ece2e 3442
02b694de 3443 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
ed8ece2e 3444 init_waitqueue_head(zone->wait_table + i);
cca448fe
YG
3445
3446 return 0;
ed8ece2e
DH
3447}
3448
112067f0
SL
3449static int __zone_pcp_update(void *data)
3450{
3451 struct zone *zone = data;
3452 int cpu;
3453 unsigned long batch = zone_batchsize(zone), flags;
3454
2d30a1f6 3455 for_each_possible_cpu(cpu) {
112067f0
SL
3456 struct per_cpu_pageset *pset;
3457 struct per_cpu_pages *pcp;
3458
99dcc3e5 3459 pset = per_cpu_ptr(zone->pageset, cpu);
112067f0
SL
3460 pcp = &pset->pcp;
3461
3462 local_irq_save(flags);
5f8dcc21 3463 free_pcppages_bulk(zone, pcp->count, pcp);
112067f0
SL
3464 setup_pageset(pset, batch);
3465 local_irq_restore(flags);
3466 }
3467 return 0;
3468}
3469
3470void zone_pcp_update(struct zone *zone)
3471{
3472 stop_machine(__zone_pcp_update, zone, NULL);
3473}
3474
c09b4240 3475static __meminit void zone_pcp_init(struct zone *zone)
ed8ece2e 3476{
99dcc3e5
CL
3477 /*
3478 * per cpu subsystem is not up at this point. The following code
3479 * relies on the ability of the linker to provide the
3480 * offset of a (static) per cpu variable into the per cpu area.
3481 */
3482 zone->pageset = &boot_pageset;
ed8ece2e 3483
f5335c0f 3484 if (zone->present_pages)
99dcc3e5
CL
3485 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3486 zone->name, zone->present_pages,
3487 zone_batchsize(zone));
ed8ece2e
DH
3488}
3489
718127cc
YG
3490__meminit int init_currently_empty_zone(struct zone *zone,
3491 unsigned long zone_start_pfn,
a2f3aa02
DH
3492 unsigned long size,
3493 enum memmap_context context)
ed8ece2e
DH
3494{
3495 struct pglist_data *pgdat = zone->zone_pgdat;
cca448fe
YG
3496 int ret;
3497 ret = zone_wait_table_init(zone, size);
3498 if (ret)
3499 return ret;
ed8ece2e
DH
3500 pgdat->nr_zones = zone_idx(zone) + 1;
3501
ed8ece2e
DH
3502 zone->zone_start_pfn = zone_start_pfn;
3503
708614e6
MG
3504 mminit_dprintk(MMINIT_TRACE, "memmap_init",
3505 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3506 pgdat->node_id,
3507 (unsigned long)zone_idx(zone),
3508 zone_start_pfn, (zone_start_pfn + size));
3509
1e548deb 3510 zone_init_free_lists(zone);
718127cc
YG
3511
3512 return 0;
ed8ece2e
DH
3513}
3514
c713216d
MG
3515#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3516/*
3517 * Basic iterator support. Return the first range of PFNs for a node
3518 * Note: nid == MAX_NUMNODES returns first region regardless of node
3519 */
a3142c8e 3520static int __meminit first_active_region_index_in_nid(int nid)
c713216d
MG
3521{
3522 int i;
3523
3524 for (i = 0; i < nr_nodemap_entries; i++)
3525 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3526 return i;
3527
3528 return -1;
3529}
3530
3531/*
3532 * Basic iterator support. Return the next active range of PFNs for a node
183ff22b 3533 * Note: nid == MAX_NUMNODES returns next region regardless of node
c713216d 3534 */
a3142c8e 3535static int __meminit next_active_region_index_in_nid(int index, int nid)
c713216d
MG
3536{
3537 for (index = index + 1; index < nr_nodemap_entries; index++)
3538 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3539 return index;
3540
3541 return -1;
3542}
3543
3544#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3545/*
3546 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3547 * Architectures may implement their own version but if add_active_range()
3548 * was used and there are no special requirements, this is a convenient
3549 * alternative
3550 */
f2dbcfa7 3551int __meminit __early_pfn_to_nid(unsigned long pfn)
c713216d
MG
3552{
3553 int i;
3554
3555 for (i = 0; i < nr_nodemap_entries; i++) {
3556 unsigned long start_pfn = early_node_map[i].start_pfn;
3557 unsigned long end_pfn = early_node_map[i].end_pfn;
3558
3559 if (start_pfn <= pfn && pfn < end_pfn)
3560 return early_node_map[i].nid;
3561 }
cc2559bc
KH
3562 /* This is a memory hole */
3563 return -1;
c713216d
MG
3564}
3565#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3566
f2dbcfa7
KH
3567int __meminit early_pfn_to_nid(unsigned long pfn)
3568{
cc2559bc
KH
3569 int nid;
3570
3571 nid = __early_pfn_to_nid(pfn);
3572 if (nid >= 0)
3573 return nid;
3574 /* just returns 0 */
3575 return 0;
f2dbcfa7
KH
3576}
3577
cc2559bc
KH
3578#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3579bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3580{
3581 int nid;
3582
3583 nid = __early_pfn_to_nid(pfn);
3584 if (nid >= 0 && nid != node)
3585 return false;
3586 return true;
3587}
3588#endif
f2dbcfa7 3589
c713216d
MG
3590/* Basic iterator support to walk early_node_map[] */
3591#define for_each_active_range_index_in_nid(i, nid) \
3592 for (i = first_active_region_index_in_nid(nid); i != -1; \
3593 i = next_active_region_index_in_nid(i, nid))
3594
3595/**
3596 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
88ca3b94
RD
3597 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3598 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
c713216d
MG
3599 *
3600 * If an architecture guarantees that all ranges registered with
3601 * add_active_ranges() contain no holes and may be freed, this
3602 * this function may be used instead of calling free_bootmem() manually.
3603 */
3604void __init free_bootmem_with_active_regions(int nid,
3605 unsigned long max_low_pfn)
3606{
3607 int i;
3608
3609 for_each_active_range_index_in_nid(i, nid) {
3610 unsigned long size_pages = 0;
3611 unsigned long end_pfn = early_node_map[i].end_pfn;
3612
3613 if (early_node_map[i].start_pfn >= max_low_pfn)
3614 continue;
3615
3616 if (end_pfn > max_low_pfn)
3617 end_pfn = max_low_pfn;
3618
3619 size_pages = end_pfn - early_node_map[i].start_pfn;
3620 free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3621 PFN_PHYS(early_node_map[i].start_pfn),
3622 size_pages << PAGE_SHIFT);
3623 }
3624}
3625
08677214
YL
3626int __init add_from_early_node_map(struct range *range, int az,
3627 int nr_range, int nid)
3628{
3629 int i;
3630 u64 start, end;
3631
3632 /* need to go over early_node_map to find out good range for node */
3633 for_each_active_range_index_in_nid(i, nid) {
3634 start = early_node_map[i].start_pfn;
3635 end = early_node_map[i].end_pfn;
3636 nr_range = add_range(range, az, nr_range, start, end);
3637 }
3638 return nr_range;
3639}
3640
2ee78f7b 3641#ifdef CONFIG_NO_BOOTMEM
08677214
YL
3642void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3643 u64 goal, u64 limit)
3644{
3645 int i;
3646 void *ptr;
3647
b8ab9f82
YL
3648 if (limit > get_max_mapped())
3649 limit = get_max_mapped();
3650
08677214
YL
3651 /* need to go over early_node_map to find out good range for node */
3652 for_each_active_range_index_in_nid(i, nid) {
3653 u64 addr;
3654 u64 ei_start, ei_last;
3655
3656 ei_last = early_node_map[i].end_pfn;
3657 ei_last <<= PAGE_SHIFT;
3658 ei_start = early_node_map[i].start_pfn;
3659 ei_start <<= PAGE_SHIFT;
3660 addr = find_early_area(ei_start, ei_last,
3661 goal, limit, size, align);
3662
3663 if (addr == -1ULL)
3664 continue;
3665
3666#if 0
3667 printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
3668 nid,
3669 ei_start, ei_last, goal, limit, size,
3670 align, addr);
3671#endif
3672
3673 ptr = phys_to_virt(addr);
3674 memset(ptr, 0, size);
3675 reserve_early_without_check(addr, addr + size, "BOOTMEM");
9078370c
CM
3676 /*
3677 * The min_count is set to 0 so that bootmem allocated blocks
3678 * are never reported as leaks.
3679 */
3680 kmemleak_alloc(ptr, size, 0, 0);
08677214
YL
3681 return ptr;
3682 }
3683
3684 return NULL;
3685}
2ee78f7b 3686#endif
08677214
YL
3687
3688
b5bc6c0e
YL
3689void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3690{
3691 int i;
d52d53b8 3692 int ret;
b5bc6c0e 3693
d52d53b8
YL
3694 for_each_active_range_index_in_nid(i, nid) {
3695 ret = work_fn(early_node_map[i].start_pfn,
3696 early_node_map[i].end_pfn, data);
3697 if (ret)
3698 break;
3699 }
b5bc6c0e 3700}
c713216d
MG
3701/**
3702 * sparse_memory_present_with_active_regions - Call memory_present for each active range
88ca3b94 3703 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
c713216d
MG
3704 *
3705 * If an architecture guarantees that all ranges registered with
3706 * add_active_ranges() contain no holes and may be freed, this
88ca3b94 3707 * function may be used instead of calling memory_present() manually.
c713216d
MG
3708 */
3709void __init sparse_memory_present_with_active_regions(int nid)
3710{
3711 int i;
3712
3713 for_each_active_range_index_in_nid(i, nid)
3714 memory_present(early_node_map[i].nid,
3715 early_node_map[i].start_pfn,
3716 early_node_map[i].end_pfn);
3717}
3718
3719/**
3720 * get_pfn_range_for_nid - Return the start and end page frames for a node
88ca3b94
RD
3721 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3722 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3723 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
c713216d
MG
3724 *
3725 * It returns the start and end page frame of a node based on information
3726 * provided by an arch calling add_active_range(). If called for a node
3727 * with no available memory, a warning is printed and the start and end
88ca3b94 3728 * PFNs will be 0.
c713216d 3729 */
a3142c8e 3730void __meminit get_pfn_range_for_nid(unsigned int nid,
c713216d
MG
3731 unsigned long *start_pfn, unsigned long *end_pfn)
3732{
3733 int i;
3734 *start_pfn = -1UL;
3735 *end_pfn = 0;
3736
3737 for_each_active_range_index_in_nid(i, nid) {
3738 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3739 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3740 }
3741
633c0666 3742 if (*start_pfn == -1UL)
c713216d 3743 *start_pfn = 0;
c713216d
MG
3744}
3745
2a1e274a
MG
3746/*
3747 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3748 * assumption is made that zones within a node are ordered in monotonic
3749 * increasing memory addresses so that the "highest" populated zone is used
3750 */
b69a7288 3751static void __init find_usable_zone_for_movable(void)
2a1e274a
MG
3752{
3753 int zone_index;
3754 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3755 if (zone_index == ZONE_MOVABLE)
3756 continue;
3757
3758 if (arch_zone_highest_possible_pfn[zone_index] >
3759 arch_zone_lowest_possible_pfn[zone_index])
3760 break;
3761 }
3762
3763 VM_BUG_ON(zone_index == -1);
3764 movable_zone = zone_index;
3765}
3766
3767/*
3768 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3769 * because it is sized independant of architecture. Unlike the other zones,
3770 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3771 * in each node depending on the size of each node and how evenly kernelcore
3772 * is distributed. This helper function adjusts the zone ranges
3773 * provided by the architecture for a given node by using the end of the
3774 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3775 * zones within a node are in order of monotonic increases memory addresses
3776 */
b69a7288 3777static void __meminit adjust_zone_range_for_zone_movable(int nid,
2a1e274a
MG
3778 unsigned long zone_type,
3779 unsigned long node_start_pfn,
3780 unsigned long node_end_pfn,
3781 unsigned long *zone_start_pfn,
3782 unsigned long *zone_end_pfn)
3783{
3784 /* Only adjust if ZONE_MOVABLE is on this node */
3785 if (zone_movable_pfn[nid]) {
3786 /* Size ZONE_MOVABLE */
3787 if (zone_type == ZONE_MOVABLE) {
3788 *zone_start_pfn = zone_movable_pfn[nid];
3789 *zone_end_pfn = min(node_end_pfn,
3790 arch_zone_highest_possible_pfn[movable_zone]);
3791
3792 /* Adjust for ZONE_MOVABLE starting within this range */
3793 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3794 *zone_end_pfn > zone_movable_pfn[nid]) {
3795 *zone_end_pfn = zone_movable_pfn[nid];
3796
3797 /* Check if this whole range is within ZONE_MOVABLE */
3798 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3799 *zone_start_pfn = *zone_end_pfn;
3800 }
3801}
3802
c713216d
MG
3803/*
3804 * Return the number of pages a zone spans in a node, including holes
3805 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3806 */
6ea6e688 3807static unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3808 unsigned long zone_type,
3809 unsigned long *ignored)
3810{
3811 unsigned long node_start_pfn, node_end_pfn;
3812 unsigned long zone_start_pfn, zone_end_pfn;
3813
3814 /* Get the start and end of the node and zone */
3815 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3816 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3817 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2a1e274a
MG
3818 adjust_zone_range_for_zone_movable(nid, zone_type,
3819 node_start_pfn, node_end_pfn,
3820 &zone_start_pfn, &zone_end_pfn);
c713216d
MG
3821
3822 /* Check that this node has pages within the zone's required range */
3823 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3824 return 0;
3825
3826 /* Move the zone boundaries inside the node if necessary */
3827 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3828 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3829
3830 /* Return the spanned pages */
3831 return zone_end_pfn - zone_start_pfn;
3832}
3833
3834/*
3835 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
88ca3b94 3836 * then all holes in the requested range will be accounted for.
c713216d 3837 */
32996250 3838unsigned long __meminit __absent_pages_in_range(int nid,
c713216d
MG
3839 unsigned long range_start_pfn,
3840 unsigned long range_end_pfn)
3841{
3842 int i = 0;
3843 unsigned long prev_end_pfn = 0, hole_pages = 0;
3844 unsigned long start_pfn;
3845
3846 /* Find the end_pfn of the first active range of pfns in the node */
3847 i = first_active_region_index_in_nid(nid);
3848 if (i == -1)
3849 return 0;
3850
b5445f95
MG
3851 prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3852
9c7cd687
MG
3853 /* Account for ranges before physical memory on this node */
3854 if (early_node_map[i].start_pfn > range_start_pfn)
b5445f95 3855 hole_pages = prev_end_pfn - range_start_pfn;
c713216d
MG
3856
3857 /* Find all holes for the zone within the node */
3858 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3859
3860 /* No need to continue if prev_end_pfn is outside the zone */
3861 if (prev_end_pfn >= range_end_pfn)
3862 break;
3863
3864 /* Make sure the end of the zone is not within the hole */
3865 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3866 prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3867
3868 /* Update the hole size cound and move on */
3869 if (start_pfn > range_start_pfn) {
3870 BUG_ON(prev_end_pfn > start_pfn);
3871 hole_pages += start_pfn - prev_end_pfn;
3872 }
3873 prev_end_pfn = early_node_map[i].end_pfn;
3874 }
3875
9c7cd687
MG
3876 /* Account for ranges past physical memory on this node */
3877 if (range_end_pfn > prev_end_pfn)
0c6cb974 3878 hole_pages += range_end_pfn -
9c7cd687
MG
3879 max(range_start_pfn, prev_end_pfn);
3880
c713216d
MG
3881 return hole_pages;
3882}
3883
3884/**
3885 * absent_pages_in_range - Return number of page frames in holes within a range
3886 * @start_pfn: The start PFN to start searching for holes
3887 * @end_pfn: The end PFN to stop searching for holes
3888 *
88ca3b94 3889 * It returns the number of pages frames in memory holes within a range.
c713216d
MG
3890 */
3891unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3892 unsigned long end_pfn)
3893{
3894 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3895}
3896
3897/* Return the number of page frames in holes in a zone on a node */
6ea6e688 3898static unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3899 unsigned long zone_type,
3900 unsigned long *ignored)
3901{
9c7cd687
MG
3902 unsigned long node_start_pfn, node_end_pfn;
3903 unsigned long zone_start_pfn, zone_end_pfn;
3904
3905 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3906 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3907 node_start_pfn);
3908 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3909 node_end_pfn);
3910
2a1e274a
MG
3911 adjust_zone_range_for_zone_movable(nid, zone_type,
3912 node_start_pfn, node_end_pfn,
3913 &zone_start_pfn, &zone_end_pfn);
9c7cd687 3914 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
c713216d 3915}
0e0b864e 3916
c713216d 3917#else
6ea6e688 3918static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
c713216d
MG
3919 unsigned long zone_type,
3920 unsigned long *zones_size)
3921{
3922 return zones_size[zone_type];
3923}
3924
6ea6e688 3925static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
c713216d
MG
3926 unsigned long zone_type,
3927 unsigned long *zholes_size)
3928{
3929 if (!zholes_size)
3930 return 0;
3931
3932 return zholes_size[zone_type];
3933}
0e0b864e 3934
c713216d
MG
3935#endif
3936
a3142c8e 3937static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
c713216d
MG
3938 unsigned long *zones_size, unsigned long *zholes_size)
3939{
3940 unsigned long realtotalpages, totalpages = 0;
3941 enum zone_type i;
3942
3943 for (i = 0; i < MAX_NR_ZONES; i++)
3944 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3945 zones_size);
3946 pgdat->node_spanned_pages = totalpages;
3947
3948 realtotalpages = totalpages;
3949 for (i = 0; i < MAX_NR_ZONES; i++)
3950 realtotalpages -=
3951 zone_absent_pages_in_node(pgdat->node_id, i,
3952 zholes_size);
3953 pgdat->node_present_pages = realtotalpages;
3954 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3955 realtotalpages);
3956}
3957
835c134e
MG
3958#ifndef CONFIG_SPARSEMEM
3959/*
3960 * Calculate the size of the zone->blockflags rounded to an unsigned long
d9c23400
MG
3961 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3962 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
835c134e
MG
3963 * round what is now in bits to nearest long in bits, then return it in
3964 * bytes.
3965 */
3966static unsigned long __init usemap_size(unsigned long zonesize)
3967{
3968 unsigned long usemapsize;
3969
d9c23400
MG
3970 usemapsize = roundup(zonesize, pageblock_nr_pages);
3971 usemapsize = usemapsize >> pageblock_order;
835c134e
MG
3972 usemapsize *= NR_PAGEBLOCK_BITS;
3973 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3974
3975 return usemapsize / 8;
3976}
3977
3978static void __init setup_usemap(struct pglist_data *pgdat,
3979 struct zone *zone, unsigned long zonesize)
3980{
3981 unsigned long usemapsize = usemap_size(zonesize);
3982 zone->pageblock_flags = NULL;
58a01a45 3983 if (usemapsize)
835c134e 3984 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
835c134e
MG
3985}
3986#else
3987static void inline setup_usemap(struct pglist_data *pgdat,
3988 struct zone *zone, unsigned long zonesize) {}
3989#endif /* CONFIG_SPARSEMEM */
3990
d9c23400 3991#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
ba72cb8c
MG
3992
3993/* Return a sensible default order for the pageblock size. */
3994static inline int pageblock_default_order(void)
3995{
3996 if (HPAGE_SHIFT > PAGE_SHIFT)
3997 return HUGETLB_PAGE_ORDER;
3998
3999 return MAX_ORDER-1;
4000}
4001
d9c23400
MG
4002/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4003static inline void __init set_pageblock_order(unsigned int order)
4004{
4005 /* Check that pageblock_nr_pages has not already been setup */
4006 if (pageblock_order)
4007 return;
4008
4009 /*
4010 * Assume the largest contiguous order of interest is a huge page.
4011 * This value may be variable depending on boot parameters on IA64
4012 */
4013 pageblock_order = order;
4014}
4015#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4016
ba72cb8c
MG
4017/*
4018 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4019 * and pageblock_default_order() are unused as pageblock_order is set
4020 * at compile-time. See include/linux/pageblock-flags.h for the values of
4021 * pageblock_order based on the kernel config
4022 */
4023static inline int pageblock_default_order(unsigned int order)
4024{
4025 return MAX_ORDER-1;
4026}
d9c23400
MG
4027#define set_pageblock_order(x) do {} while (0)
4028
4029#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4030
1da177e4
LT
4031/*
4032 * Set up the zone data structures:
4033 * - mark all pages reserved
4034 * - mark all memory queues empty
4035 * - clear the memory bitmaps
4036 */
b5a0e011 4037static void __paginginit free_area_init_core(struct pglist_data *pgdat,
1da177e4
LT
4038 unsigned long *zones_size, unsigned long *zholes_size)
4039{
2f1b6248 4040 enum zone_type j;
ed8ece2e 4041 int nid = pgdat->node_id;
1da177e4 4042 unsigned long zone_start_pfn = pgdat->node_start_pfn;
718127cc 4043 int ret;
1da177e4 4044
208d54e5 4045 pgdat_resize_init(pgdat);
1da177e4
LT
4046 pgdat->nr_zones = 0;
4047 init_waitqueue_head(&pgdat->kswapd_wait);
4048 pgdat->kswapd_max_order = 0;
52d4b9ac 4049 pgdat_page_cgroup_init(pgdat);
1da177e4
LT
4050
4051 for (j = 0; j < MAX_NR_ZONES; j++) {
4052 struct zone *zone = pgdat->node_zones + j;
0e0b864e 4053 unsigned long size, realsize, memmap_pages;
b69408e8 4054 enum lru_list l;
1da177e4 4055
c713216d
MG
4056 size = zone_spanned_pages_in_node(nid, j, zones_size);
4057 realsize = size - zone_absent_pages_in_node(nid, j,
4058 zholes_size);
1da177e4 4059
0e0b864e
MG
4060 /*
4061 * Adjust realsize so that it accounts for how much memory
4062 * is used by this zone for memmap. This affects the watermark
4063 * and per-cpu initialisations
4064 */
f7232154
JW
4065 memmap_pages =
4066 PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
0e0b864e
MG
4067 if (realsize >= memmap_pages) {
4068 realsize -= memmap_pages;
5594c8c8
YL
4069 if (memmap_pages)
4070 printk(KERN_DEBUG
4071 " %s zone: %lu pages used for memmap\n",
4072 zone_names[j], memmap_pages);
0e0b864e
MG
4073 } else
4074 printk(KERN_WARNING
4075 " %s zone: %lu pages exceeds realsize %lu\n",
4076 zone_names[j], memmap_pages, realsize);
4077
6267276f
CL
4078 /* Account for reserved pages */
4079 if (j == 0 && realsize > dma_reserve) {
0e0b864e 4080 realsize -= dma_reserve;
d903ef9f 4081 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
6267276f 4082 zone_names[0], dma_reserve);
0e0b864e
MG
4083 }
4084
98d2b0eb 4085 if (!is_highmem_idx(j))
1da177e4
LT
4086 nr_kernel_pages += realsize;
4087 nr_all_pages += realsize;
4088
4089 zone->spanned_pages = size;
4090 zone->present_pages = realsize;
9614634f 4091#ifdef CONFIG_NUMA
d5f541ed 4092 zone->node = nid;
8417bba4 4093 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
9614634f 4094 / 100;
0ff38490 4095 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
9614634f 4096#endif
1da177e4
LT
4097 zone->name = zone_names[j];
4098 spin_lock_init(&zone->lock);
4099 spin_lock_init(&zone->lru_lock);
bdc8cb98 4100 zone_seqlock_init(zone);
1da177e4 4101 zone->zone_pgdat = pgdat;
1da177e4 4102
ed8ece2e 4103 zone_pcp_init(zone);
b69408e8
CL
4104 for_each_lru(l) {
4105 INIT_LIST_HEAD(&zone->lru[l].list);
f8629631 4106 zone->reclaim_stat.nr_saved_scan[l] = 0;
b69408e8 4107 }
6e901571
KM
4108 zone->reclaim_stat.recent_rotated[0] = 0;
4109 zone->reclaim_stat.recent_rotated[1] = 0;
4110 zone->reclaim_stat.recent_scanned[0] = 0;
4111 zone->reclaim_stat.recent_scanned[1] = 0;
2244b95a 4112 zap_zone_vm_stats(zone);
e815af95 4113 zone->flags = 0;
1da177e4
LT
4114 if (!size)
4115 continue;
4116
ba72cb8c 4117 set_pageblock_order(pageblock_default_order());
835c134e 4118 setup_usemap(pgdat, zone, size);
a2f3aa02
DH
4119 ret = init_currently_empty_zone(zone, zone_start_pfn,
4120 size, MEMMAP_EARLY);
718127cc 4121 BUG_ON(ret);
76cdd58e 4122 memmap_init(size, nid, j, zone_start_pfn);
1da177e4 4123 zone_start_pfn += size;
1da177e4
LT
4124 }
4125}
4126
577a32f6 4127static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
1da177e4 4128{
1da177e4
LT
4129 /* Skip empty nodes */
4130 if (!pgdat->node_spanned_pages)
4131 return;
4132
d41dee36 4133#ifdef CONFIG_FLAT_NODE_MEM_MAP
1da177e4
LT
4134 /* ia64 gets its own node_mem_map, before this, without bootmem */
4135 if (!pgdat->node_mem_map) {
e984bb43 4136 unsigned long size, start, end;
d41dee36
AW
4137 struct page *map;
4138
e984bb43
BP
4139 /*
4140 * The zone's endpoints aren't required to be MAX_ORDER
4141 * aligned but the node_mem_map endpoints must be in order
4142 * for the buddy allocator to function correctly.
4143 */
4144 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4145 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4146 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4147 size = (end - start) * sizeof(struct page);
6f167ec7
DH
4148 map = alloc_remap(pgdat->node_id, size);
4149 if (!map)
4150 map = alloc_bootmem_node(pgdat, size);
e984bb43 4151 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
1da177e4 4152 }
12d810c1 4153#ifndef CONFIG_NEED_MULTIPLE_NODES
1da177e4
LT
4154 /*
4155 * With no DISCONTIG, the global mem_map is just set as node 0's
4156 */
c713216d 4157 if (pgdat == NODE_DATA(0)) {
1da177e4 4158 mem_map = NODE_DATA(0)->node_mem_map;
c713216d
MG
4159#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4160 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
467bc461 4161 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
c713216d
MG
4162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4163 }
1da177e4 4164#endif
d41dee36 4165#endif /* CONFIG_FLAT_NODE_MEM_MAP */
1da177e4
LT
4166}
4167
9109fb7b
JW
4168void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4169 unsigned long node_start_pfn, unsigned long *zholes_size)
1da177e4 4170{
9109fb7b
JW
4171 pg_data_t *pgdat = NODE_DATA(nid);
4172
1da177e4
LT
4173 pgdat->node_id = nid;
4174 pgdat->node_start_pfn = node_start_pfn;
c713216d 4175 calculate_node_totalpages(pgdat, zones_size, zholes_size);
1da177e4
LT
4176
4177 alloc_node_mem_map(pgdat);
e8c27ac9
YL
4178#ifdef CONFIG_FLAT_NODE_MEM_MAP
4179 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4180 nid, (unsigned long)pgdat,
4181 (unsigned long)pgdat->node_mem_map);
4182#endif
1da177e4
LT
4183
4184 free_area_init_core(pgdat, zones_size, zholes_size);
4185}
4186
c713216d 4187#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
418508c1
MS
4188
4189#if MAX_NUMNODES > 1
4190/*
4191 * Figure out the number of possible node ids.
4192 */
4193static void __init setup_nr_node_ids(void)
4194{
4195 unsigned int node;
4196 unsigned int highest = 0;
4197
4198 for_each_node_mask(node, node_possible_map)
4199 highest = node;
4200 nr_node_ids = highest + 1;
4201}
4202#else
4203static inline void setup_nr_node_ids(void)
4204{
4205}
4206#endif
4207
c713216d
MG
4208/**
4209 * add_active_range - Register a range of PFNs backed by physical memory
4210 * @nid: The node ID the range resides on
4211 * @start_pfn: The start PFN of the available physical memory
4212 * @end_pfn: The end PFN of the available physical memory
4213 *
4214 * These ranges are stored in an early_node_map[] and later used by
4215 * free_area_init_nodes() to calculate zone sizes and holes. If the
4216 * range spans a memory hole, it is up to the architecture to ensure
4217 * the memory is not freed by the bootmem allocator. If possible
4218 * the range being registered will be merged with existing ranges.
4219 */
4220void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4221 unsigned long end_pfn)
4222{
4223 int i;
4224
6b74ab97
MG
4225 mminit_dprintk(MMINIT_TRACE, "memory_register",
4226 "Entering add_active_range(%d, %#lx, %#lx) "
4227 "%d entries of %d used\n",
4228 nid, start_pfn, end_pfn,
4229 nr_nodemap_entries, MAX_ACTIVE_REGIONS);
c713216d 4230
2dbb51c4
MG
4231 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4232
c713216d
MG
4233 /* Merge with existing active regions if possible */
4234 for (i = 0; i < nr_nodemap_entries; i++) {
4235 if (early_node_map[i].nid != nid)
4236 continue;
4237
4238 /* Skip if an existing region covers this new one */
4239 if (start_pfn >= early_node_map[i].start_pfn &&
4240 end_pfn <= early_node_map[i].end_pfn)
4241 return;
4242
4243 /* Merge forward if suitable */
4244 if (start_pfn <= early_node_map[i].end_pfn &&
4245 end_pfn > early_node_map[i].end_pfn) {
4246 early_node_map[i].end_pfn = end_pfn;
4247 return;
4248 }
4249
4250 /* Merge backward if suitable */
d2dbe08d 4251 if (start_pfn < early_node_map[i].start_pfn &&
c713216d
MG
4252 end_pfn >= early_node_map[i].start_pfn) {
4253 early_node_map[i].start_pfn = start_pfn;
4254 return;
4255 }
4256 }
4257
4258 /* Check that early_node_map is large enough */
4259 if (i >= MAX_ACTIVE_REGIONS) {
4260 printk(KERN_CRIT "More than %d memory regions, truncating\n",
4261 MAX_ACTIVE_REGIONS);
4262 return;
4263 }
4264
4265 early_node_map[i].nid = nid;
4266 early_node_map[i].start_pfn = start_pfn;
4267 early_node_map[i].end_pfn = end_pfn;
4268 nr_nodemap_entries = i + 1;
4269}
4270
4271/**
cc1050ba 4272 * remove_active_range - Shrink an existing registered range of PFNs
c713216d 4273 * @nid: The node id the range is on that should be shrunk
cc1050ba
YL
4274 * @start_pfn: The new PFN of the range
4275 * @end_pfn: The new PFN of the range
c713216d
MG
4276 *
4277 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
cc1a9d86
YL
4278 * The map is kept near the end physical page range that has already been
4279 * registered. This function allows an arch to shrink an existing registered
4280 * range.
c713216d 4281 */
cc1050ba
YL
4282void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4283 unsigned long end_pfn)
c713216d 4284{
cc1a9d86
YL
4285 int i, j;
4286 int removed = 0;
c713216d 4287
cc1050ba
YL
4288 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4289 nid, start_pfn, end_pfn);
4290
c713216d 4291 /* Find the old active region end and shrink */
cc1a9d86 4292 for_each_active_range_index_in_nid(i, nid) {
cc1050ba
YL
4293 if (early_node_map[i].start_pfn >= start_pfn &&
4294 early_node_map[i].end_pfn <= end_pfn) {
cc1a9d86 4295 /* clear it */
cc1050ba 4296 early_node_map[i].start_pfn = 0;
cc1a9d86
YL
4297 early_node_map[i].end_pfn = 0;
4298 removed = 1;
4299 continue;
4300 }
cc1050ba
YL
4301 if (early_node_map[i].start_pfn < start_pfn &&
4302 early_node_map[i].end_pfn > start_pfn) {
4303 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4304 early_node_map[i].end_pfn = start_pfn;
4305 if (temp_end_pfn > end_pfn)
4306 add_active_range(nid, end_pfn, temp_end_pfn);
4307 continue;
4308 }
4309 if (early_node_map[i].start_pfn >= start_pfn &&
4310 early_node_map[i].end_pfn > end_pfn &&
4311 early_node_map[i].start_pfn < end_pfn) {
4312 early_node_map[i].start_pfn = end_pfn;
cc1a9d86 4313 continue;
c713216d 4314 }
cc1a9d86
YL
4315 }
4316
4317 if (!removed)
4318 return;
4319
4320 /* remove the blank ones */
4321 for (i = nr_nodemap_entries - 1; i > 0; i--) {
4322 if (early_node_map[i].nid != nid)
4323 continue;
4324 if (early_node_map[i].end_pfn)
4325 continue;
4326 /* we found it, get rid of it */
4327 for (j = i; j < nr_nodemap_entries - 1; j++)
4328 memcpy(&early_node_map[j], &early_node_map[j+1],
4329 sizeof(early_node_map[j]));
4330 j = nr_nodemap_entries - 1;
4331 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4332 nr_nodemap_entries--;
4333 }
c713216d
MG
4334}
4335
4336/**
4337 * remove_all_active_ranges - Remove all currently registered regions
88ca3b94 4338 *
c713216d
MG
4339 * During discovery, it may be found that a table like SRAT is invalid
4340 * and an alternative discovery method must be used. This function removes
4341 * all currently registered regions.
4342 */
88ca3b94 4343void __init remove_all_active_ranges(void)
c713216d
MG
4344{
4345 memset(early_node_map, 0, sizeof(early_node_map));
4346 nr_nodemap_entries = 0;
4347}
4348
4349/* Compare two active node_active_regions */
4350static int __init cmp_node_active_region(const void *a, const void *b)
4351{
4352 struct node_active_region *arange = (struct node_active_region *)a;
4353 struct node_active_region *brange = (struct node_active_region *)b;
4354
4355 /* Done this way to avoid overflows */
4356 if (arange->start_pfn > brange->start_pfn)
4357 return 1;
4358 if (arange->start_pfn < brange->start_pfn)
4359 return -1;
4360
4361 return 0;
4362}
4363
4364/* sort the node_map by start_pfn */
32996250 4365void __init sort_node_map(void)
c713216d
MG
4366{
4367 sort(early_node_map, (size_t)nr_nodemap_entries,
4368 sizeof(struct node_active_region),
4369 cmp_node_active_region, NULL);
4370}
4371
a6af2bc3 4372/* Find the lowest pfn for a node */
b69a7288 4373static unsigned long __init find_min_pfn_for_node(int nid)
c713216d
MG
4374{
4375 int i;
a6af2bc3 4376 unsigned long min_pfn = ULONG_MAX;
1abbfb41 4377
c713216d
MG
4378 /* Assuming a sorted map, the first range found has the starting pfn */
4379 for_each_active_range_index_in_nid(i, nid)
a6af2bc3 4380 min_pfn = min(min_pfn, early_node_map[i].start_pfn);
c713216d 4381
a6af2bc3
MG
4382 if (min_pfn == ULONG_MAX) {
4383 printk(KERN_WARNING
2bc0d261 4384 "Could not find start_pfn for node %d\n", nid);
a6af2bc3
MG
4385 return 0;
4386 }
4387
4388 return min_pfn;
c713216d
MG
4389}
4390
4391/**
4392 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4393 *
4394 * It returns the minimum PFN based on information provided via
88ca3b94 4395 * add_active_range().
c713216d
MG
4396 */
4397unsigned long __init find_min_pfn_with_active_regions(void)
4398{
4399 return find_min_pfn_for_node(MAX_NUMNODES);
4400}
4401
37b07e41
LS
4402/*
4403 * early_calculate_totalpages()
4404 * Sum pages in active regions for movable zone.
4405 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4406 */
484f51f8 4407static unsigned long __init early_calculate_totalpages(void)
7e63efef
MG
4408{
4409 int i;
4410 unsigned long totalpages = 0;
4411
37b07e41
LS
4412 for (i = 0; i < nr_nodemap_entries; i++) {
4413 unsigned long pages = early_node_map[i].end_pfn -
7e63efef 4414 early_node_map[i].start_pfn;
37b07e41
LS
4415 totalpages += pages;
4416 if (pages)
4417 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4418 }
4419 return totalpages;
7e63efef
MG
4420}
4421
2a1e274a
MG
4422/*
4423 * Find the PFN the Movable zone begins in each node. Kernel memory
4424 * is spread evenly between nodes as long as the nodes have enough
4425 * memory. When they don't, some nodes will have more kernelcore than
4426 * others
4427 */
b69a7288 4428static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
2a1e274a
MG
4429{
4430 int i, nid;
4431 unsigned long usable_startpfn;
4432 unsigned long kernelcore_node, kernelcore_remaining;
66918dcd
YL
4433 /* save the state before borrow the nodemask */
4434 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
37b07e41
LS
4435 unsigned long totalpages = early_calculate_totalpages();
4436 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
2a1e274a 4437
7e63efef
MG
4438 /*
4439 * If movablecore was specified, calculate what size of
4440 * kernelcore that corresponds so that memory usable for
4441 * any allocation type is evenly spread. If both kernelcore
4442 * and movablecore are specified, then the value of kernelcore
4443 * will be used for required_kernelcore if it's greater than
4444 * what movablecore would have allowed.
4445 */
4446 if (required_movablecore) {
7e63efef
MG
4447 unsigned long corepages;
4448
4449 /*
4450 * Round-up so that ZONE_MOVABLE is at least as large as what
4451 * was requested by the user
4452 */
4453 required_movablecore =
4454 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4455 corepages = totalpages - required_movablecore;
4456
4457 required_kernelcore = max(required_kernelcore, corepages);
4458 }
4459
2a1e274a
MG
4460 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4461 if (!required_kernelcore)
66918dcd 4462 goto out;
2a1e274a
MG
4463
4464 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4465 find_usable_zone_for_movable();
4466 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4467
4468restart:
4469 /* Spread kernelcore memory as evenly as possible throughout nodes */
4470 kernelcore_node = required_kernelcore / usable_nodes;
37b07e41 4471 for_each_node_state(nid, N_HIGH_MEMORY) {
2a1e274a
MG
4472 /*
4473 * Recalculate kernelcore_node if the division per node
4474 * now exceeds what is necessary to satisfy the requested
4475 * amount of memory for the kernel
4476 */
4477 if (required_kernelcore < kernelcore_node)
4478 kernelcore_node = required_kernelcore / usable_nodes;
4479
4480 /*
4481 * As the map is walked, we track how much memory is usable
4482 * by the kernel using kernelcore_remaining. When it is
4483 * 0, the rest of the node is usable by ZONE_MOVABLE
4484 */
4485 kernelcore_remaining = kernelcore_node;
4486
4487 /* Go through each range of PFNs within this node */
4488 for_each_active_range_index_in_nid(i, nid) {
4489 unsigned long start_pfn, end_pfn;
4490 unsigned long size_pages;
4491
4492 start_pfn = max(early_node_map[i].start_pfn,
4493 zone_movable_pfn[nid]);
4494 end_pfn = early_node_map[i].end_pfn;
4495 if (start_pfn >= end_pfn)
4496 continue;
4497
4498 /* Account for what is only usable for kernelcore */
4499 if (start_pfn < usable_startpfn) {
4500 unsigned long kernel_pages;
4501 kernel_pages = min(end_pfn, usable_startpfn)
4502 - start_pfn;
4503
4504 kernelcore_remaining -= min(kernel_pages,
4505 kernelcore_remaining);
4506 required_kernelcore -= min(kernel_pages,
4507 required_kernelcore);
4508
4509 /* Continue if range is now fully accounted */
4510 if (end_pfn <= usable_startpfn) {
4511
4512 /*
4513 * Push zone_movable_pfn to the end so
4514 * that if we have to rebalance
4515 * kernelcore across nodes, we will
4516 * not double account here
4517 */
4518 zone_movable_pfn[nid] = end_pfn;
4519 continue;
4520 }
4521 start_pfn = usable_startpfn;
4522 }
4523
4524 /*
4525 * The usable PFN range for ZONE_MOVABLE is from
4526 * start_pfn->end_pfn. Calculate size_pages as the
4527 * number of pages used as kernelcore
4528 */
4529 size_pages = end_pfn - start_pfn;
4530 if (size_pages > kernelcore_remaining)
4531 size_pages = kernelcore_remaining;
4532 zone_movable_pfn[nid] = start_pfn + size_pages;
4533
4534 /*
4535 * Some kernelcore has been met, update counts and
4536 * break if the kernelcore for this node has been
4537 * satisified
4538 */
4539 required_kernelcore -= min(required_kernelcore,
4540 size_pages);
4541 kernelcore_remaining -= size_pages;
4542 if (!kernelcore_remaining)
4543 break;
4544 }
4545 }
4546
4547 /*
4548 * If there is still required_kernelcore, we do another pass with one
4549 * less node in the count. This will push zone_movable_pfn[nid] further
4550 * along on the nodes that still have memory until kernelcore is
4551 * satisified
4552 */
4553 usable_nodes--;
4554 if (usable_nodes && required_kernelcore > usable_nodes)
4555 goto restart;
4556
4557 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4558 for (nid = 0; nid < MAX_NUMNODES; nid++)
4559 zone_movable_pfn[nid] =
4560 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
66918dcd
YL
4561
4562out:
4563 /* restore the node_state */
4564 node_states[N_HIGH_MEMORY] = saved_node_state;
2a1e274a
MG
4565}
4566
37b07e41
LS
4567/* Any regular memory on that node ? */
4568static void check_for_regular_memory(pg_data_t *pgdat)
4569{
4570#ifdef CONFIG_HIGHMEM
4571 enum zone_type zone_type;
4572
4573 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4574 struct zone *zone = &pgdat->node_zones[zone_type];
4575 if (zone->present_pages)
4576 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4577 }
4578#endif
4579}
4580
c713216d
MG
4581/**
4582 * free_area_init_nodes - Initialise all pg_data_t and zone data
88ca3b94 4583 * @max_zone_pfn: an array of max PFNs for each zone
c713216d
MG
4584 *
4585 * This will call free_area_init_node() for each active node in the system.
4586 * Using the page ranges provided by add_active_range(), the size of each
4587 * zone in each node and their holes is calculated. If the maximum PFN
4588 * between two adjacent zones match, it is assumed that the zone is empty.
4589 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4590 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4591 * starts where the previous one ended. For example, ZONE_DMA32 starts
4592 * at arch_max_dma_pfn.
4593 */
4594void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4595{
4596 unsigned long nid;
db99100d 4597 int i;
c713216d 4598
a6af2bc3
MG
4599 /* Sort early_node_map as initialisation assumes it is sorted */
4600 sort_node_map();
4601
c713216d
MG
4602 /* Record where the zone boundaries are */
4603 memset(arch_zone_lowest_possible_pfn, 0,
4604 sizeof(arch_zone_lowest_possible_pfn));
4605 memset(arch_zone_highest_possible_pfn, 0,
4606 sizeof(arch_zone_highest_possible_pfn));
4607 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4608 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4609 for (i = 1; i < MAX_NR_ZONES; i++) {
2a1e274a
MG
4610 if (i == ZONE_MOVABLE)
4611 continue;
c713216d
MG
4612 arch_zone_lowest_possible_pfn[i] =
4613 arch_zone_highest_possible_pfn[i-1];
4614 arch_zone_highest_possible_pfn[i] =
4615 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4616 }
2a1e274a
MG
4617 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4618 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4619
4620 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4621 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4622 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
c713216d 4623
c713216d
MG
4624 /* Print out the zone ranges */
4625 printk("Zone PFN ranges:\n");
2a1e274a
MG
4626 for (i = 0; i < MAX_NR_ZONES; i++) {
4627 if (i == ZONE_MOVABLE)
4628 continue;
72f0ba02
DR
4629 printk(" %-8s ", zone_names[i]);
4630 if (arch_zone_lowest_possible_pfn[i] ==
4631 arch_zone_highest_possible_pfn[i])
4632 printk("empty\n");
4633 else
4634 printk("%0#10lx -> %0#10lx\n",
c713216d
MG
4635 arch_zone_lowest_possible_pfn[i],
4636 arch_zone_highest_possible_pfn[i]);
2a1e274a
MG
4637 }
4638
4639 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4640 printk("Movable zone start PFN for each node\n");
4641 for (i = 0; i < MAX_NUMNODES; i++) {
4642 if (zone_movable_pfn[i])
4643 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4644 }
c713216d
MG
4645
4646 /* Print out the early_node_map[] */
4647 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4648 for (i = 0; i < nr_nodemap_entries; i++)
5dab8ec1 4649 printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
c713216d
MG
4650 early_node_map[i].start_pfn,
4651 early_node_map[i].end_pfn);
4652
4653 /* Initialise every node */
708614e6 4654 mminit_verify_pageflags_layout();
8ef82866 4655 setup_nr_node_ids();
c713216d
MG
4656 for_each_online_node(nid) {
4657 pg_data_t *pgdat = NODE_DATA(nid);
9109fb7b 4658 free_area_init_node(nid, NULL,
c713216d 4659 find_min_pfn_for_node(nid), NULL);
37b07e41
LS
4660
4661 /* Any memory on that node */
4662 if (pgdat->node_present_pages)
4663 node_set_state(nid, N_HIGH_MEMORY);
4664 check_for_regular_memory(pgdat);
c713216d
MG
4665 }
4666}
2a1e274a 4667
7e63efef 4668static int __init cmdline_parse_core(char *p, unsigned long *core)
2a1e274a
MG
4669{
4670 unsigned long long coremem;
4671 if (!p)
4672 return -EINVAL;
4673
4674 coremem = memparse(p, &p);
7e63efef 4675 *core = coremem >> PAGE_SHIFT;
2a1e274a 4676
7e63efef 4677 /* Paranoid check that UL is enough for the coremem value */
2a1e274a
MG
4678 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4679
4680 return 0;
4681}
ed7ed365 4682
7e63efef
MG
4683/*
4684 * kernelcore=size sets the amount of memory for use for allocations that
4685 * cannot be reclaimed or migrated.
4686 */
4687static int __init cmdline_parse_kernelcore(char *p)
4688{
4689 return cmdline_parse_core(p, &required_kernelcore);
4690}
4691
4692/*
4693 * movablecore=size sets the amount of memory for use for allocations that
4694 * can be reclaimed or migrated.
4695 */
4696static int __init cmdline_parse_movablecore(char *p)
4697{
4698 return cmdline_parse_core(p, &required_movablecore);
4699}
4700
ed7ed365 4701early_param("kernelcore", cmdline_parse_kernelcore);
7e63efef 4702early_param("movablecore", cmdline_parse_movablecore);
ed7ed365 4703
c713216d
MG
4704#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4705
0e0b864e 4706/**
88ca3b94
RD
4707 * set_dma_reserve - set the specified number of pages reserved in the first zone
4708 * @new_dma_reserve: The number of pages to mark reserved
0e0b864e
MG
4709 *
4710 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4711 * In the DMA zone, a significant percentage may be consumed by kernel image
4712 * and other unfreeable allocations which can skew the watermarks badly. This
88ca3b94
RD
4713 * function may optionally be used to account for unfreeable pages in the
4714 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4715 * smaller per-cpu batchsize.
0e0b864e
MG
4716 */
4717void __init set_dma_reserve(unsigned long new_dma_reserve)
4718{
4719 dma_reserve = new_dma_reserve;
4720}
4721
93b7504e 4722#ifndef CONFIG_NEED_MULTIPLE_NODES
08677214
YL
4723struct pglist_data __refdata contig_page_data = {
4724#ifndef CONFIG_NO_BOOTMEM
4725 .bdata = &bootmem_node_data[0]
4726#endif
4727 };
1da177e4 4728EXPORT_SYMBOL(contig_page_data);
93b7504e 4729#endif
1da177e4
LT
4730
4731void __init free_area_init(unsigned long *zones_size)
4732{
9109fb7b 4733 free_area_init_node(0, zones_size,
1da177e4
LT
4734 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4735}
1da177e4 4736
1da177e4
LT
4737static int page_alloc_cpu_notify(struct notifier_block *self,
4738 unsigned long action, void *hcpu)
4739{
4740 int cpu = (unsigned long)hcpu;
1da177e4 4741
8bb78442 4742 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
9f8f2172
CL
4743 drain_pages(cpu);
4744
4745 /*
4746 * Spill the event counters of the dead processor
4747 * into the current processors event counters.
4748 * This artificially elevates the count of the current
4749 * processor.
4750 */
f8891e5e 4751 vm_events_fold_cpu(cpu);
9f8f2172
CL
4752
4753 /*
4754 * Zero the differential counters of the dead processor
4755 * so that the vm statistics are consistent.
4756 *
4757 * This is only okay since the processor is dead and cannot
4758 * race with what we are doing.
4759 */
2244b95a 4760 refresh_cpu_vm_stats(cpu);
1da177e4
LT
4761 }
4762 return NOTIFY_OK;
4763}
1da177e4
LT
4764
4765void __init page_alloc_init(void)
4766{
4767 hotcpu_notifier(page_alloc_cpu_notify, 0);
4768}
4769
cb45b0e9
HA
4770/*
4771 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4772 * or min_free_kbytes changes.
4773 */
4774static void calculate_totalreserve_pages(void)
4775{
4776 struct pglist_data *pgdat;
4777 unsigned long reserve_pages = 0;
2f6726e5 4778 enum zone_type i, j;
cb45b0e9
HA
4779
4780 for_each_online_pgdat(pgdat) {
4781 for (i = 0; i < MAX_NR_ZONES; i++) {
4782 struct zone *zone = pgdat->node_zones + i;
4783 unsigned long max = 0;
4784
4785 /* Find valid and maximum lowmem_reserve in the zone */
4786 for (j = i; j < MAX_NR_ZONES; j++) {
4787 if (zone->lowmem_reserve[j] > max)
4788 max = zone->lowmem_reserve[j];
4789 }
4790
41858966
MG
4791 /* we treat the high watermark as reserved pages. */
4792 max += high_wmark_pages(zone);
cb45b0e9
HA
4793
4794 if (max > zone->present_pages)
4795 max = zone->present_pages;
4796 reserve_pages += max;
4797 }
4798 }
4799 totalreserve_pages = reserve_pages;
4800}
4801
1da177e4
LT
4802/*
4803 * setup_per_zone_lowmem_reserve - called whenever
4804 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4805 * has a correct pages reserved value, so an adequate number of
4806 * pages are left in the zone after a successful __alloc_pages().
4807 */
4808static void setup_per_zone_lowmem_reserve(void)
4809{
4810 struct pglist_data *pgdat;
2f6726e5 4811 enum zone_type j, idx;
1da177e4 4812
ec936fc5 4813 for_each_online_pgdat(pgdat) {
1da177e4
LT
4814 for (j = 0; j < MAX_NR_ZONES; j++) {
4815 struct zone *zone = pgdat->node_zones + j;
4816 unsigned long present_pages = zone->present_pages;
4817
4818 zone->lowmem_reserve[j] = 0;
4819
2f6726e5
CL
4820 idx = j;
4821 while (idx) {
1da177e4
LT
4822 struct zone *lower_zone;
4823
2f6726e5
CL
4824 idx--;
4825
1da177e4
LT
4826 if (sysctl_lowmem_reserve_ratio[idx] < 1)
4827 sysctl_lowmem_reserve_ratio[idx] = 1;
4828
4829 lower_zone = pgdat->node_zones + idx;
4830 lower_zone->lowmem_reserve[j] = present_pages /
4831 sysctl_lowmem_reserve_ratio[idx];
4832 present_pages += lower_zone->present_pages;
4833 }
4834 }
4835 }
cb45b0e9
HA
4836
4837 /* update totalreserve_pages */
4838 calculate_totalreserve_pages();
1da177e4
LT
4839}
4840
88ca3b94 4841/**
bc75d33f 4842 * setup_per_zone_wmarks - called when min_free_kbytes changes
bce7394a 4843 * or when memory is hot-{added|removed}
88ca3b94 4844 *
bc75d33f
MK
4845 * Ensures that the watermark[min,low,high] values for each zone are set
4846 * correctly with respect to min_free_kbytes.
1da177e4 4847 */
bc75d33f 4848void setup_per_zone_wmarks(void)
1da177e4
LT
4849{
4850 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4851 unsigned long lowmem_pages = 0;
4852 struct zone *zone;
4853 unsigned long flags;
4854
4855 /* Calculate total number of !ZONE_HIGHMEM pages */
4856 for_each_zone(zone) {
4857 if (!is_highmem(zone))
4858 lowmem_pages += zone->present_pages;
4859 }
4860
4861 for_each_zone(zone) {
ac924c60
AM
4862 u64 tmp;
4863
1125b4e3 4864 spin_lock_irqsave(&zone->lock, flags);
ac924c60
AM
4865 tmp = (u64)pages_min * zone->present_pages;
4866 do_div(tmp, lowmem_pages);
1da177e4
LT
4867 if (is_highmem(zone)) {
4868 /*
669ed175
NP
4869 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4870 * need highmem pages, so cap pages_min to a small
4871 * value here.
4872 *
41858966 4873 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
669ed175
NP
4874 * deltas controls asynch page reclaim, and so should
4875 * not be capped for highmem.
1da177e4
LT
4876 */
4877 int min_pages;
4878
4879 min_pages = zone->present_pages / 1024;
4880 if (min_pages < SWAP_CLUSTER_MAX)
4881 min_pages = SWAP_CLUSTER_MAX;
4882 if (min_pages > 128)
4883 min_pages = 128;
41858966 4884 zone->watermark[WMARK_MIN] = min_pages;
1da177e4 4885 } else {
669ed175
NP
4886 /*
4887 * If it's a lowmem zone, reserve a number of pages
1da177e4
LT
4888 * proportionate to the zone's size.
4889 */
41858966 4890 zone->watermark[WMARK_MIN] = tmp;
1da177e4
LT
4891 }
4892
41858966
MG
4893 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4894 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
56fd56b8 4895 setup_zone_migrate_reserve(zone);
1125b4e3 4896 spin_unlock_irqrestore(&zone->lock, flags);
1da177e4 4897 }
cb45b0e9
HA
4898
4899 /* update totalreserve_pages */
4900 calculate_totalreserve_pages();
1da177e4
LT
4901}
4902
55a4462a 4903/*
556adecb
RR
4904 * The inactive anon list should be small enough that the VM never has to
4905 * do too much work, but large enough that each inactive page has a chance
4906 * to be referenced again before it is swapped out.
4907 *
4908 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4909 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4910 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4911 * the anonymous pages are kept on the inactive list.
4912 *
4913 * total target max
4914 * memory ratio inactive anon
4915 * -------------------------------------
4916 * 10MB 1 5MB
4917 * 100MB 1 50MB
4918 * 1GB 3 250MB
4919 * 10GB 10 0.9GB
4920 * 100GB 31 3GB
4921 * 1TB 101 10GB
4922 * 10TB 320 32GB
4923 */
96cb4df5 4924void calculate_zone_inactive_ratio(struct zone *zone)
556adecb 4925{
96cb4df5 4926 unsigned int gb, ratio;
556adecb 4927
96cb4df5
MK
4928 /* Zone size in gigabytes */
4929 gb = zone->present_pages >> (30 - PAGE_SHIFT);
4930 if (gb)
556adecb 4931 ratio = int_sqrt(10 * gb);
96cb4df5
MK
4932 else
4933 ratio = 1;
556adecb 4934
96cb4df5
MK
4935 zone->inactive_ratio = ratio;
4936}
556adecb 4937
96cb4df5
MK
4938static void __init setup_per_zone_inactive_ratio(void)
4939{
4940 struct zone *zone;
4941
4942 for_each_zone(zone)
4943 calculate_zone_inactive_ratio(zone);
556adecb
RR
4944}
4945
1da177e4
LT
4946/*
4947 * Initialise min_free_kbytes.
4948 *
4949 * For small machines we want it small (128k min). For large machines
4950 * we want it large (64MB max). But it is not linear, because network
4951 * bandwidth does not increase linearly with machine size. We use
4952 *
4953 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4954 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
4955 *
4956 * which yields
4957 *
4958 * 16MB: 512k
4959 * 32MB: 724k
4960 * 64MB: 1024k
4961 * 128MB: 1448k
4962 * 256MB: 2048k
4963 * 512MB: 2896k
4964 * 1024MB: 4096k
4965 * 2048MB: 5792k
4966 * 4096MB: 8192k
4967 * 8192MB: 11584k
4968 * 16384MB: 16384k
4969 */
bc75d33f 4970static int __init init_per_zone_wmark_min(void)
1da177e4
LT
4971{
4972 unsigned long lowmem_kbytes;
4973
4974 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4975
4976 min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4977 if (min_free_kbytes < 128)
4978 min_free_kbytes = 128;
4979 if (min_free_kbytes > 65536)
4980 min_free_kbytes = 65536;
bc75d33f 4981 setup_per_zone_wmarks();
1da177e4 4982 setup_per_zone_lowmem_reserve();
556adecb 4983 setup_per_zone_inactive_ratio();
1da177e4
LT
4984 return 0;
4985}
bc75d33f 4986module_init(init_per_zone_wmark_min)
1da177e4
LT
4987
4988/*
4989 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4990 * that we can call two helper functions whenever min_free_kbytes
4991 * changes.
4992 */
4993int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
8d65af78 4994 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 4995{
8d65af78 4996 proc_dointvec(table, write, buffer, length, ppos);
3b1d92c5 4997 if (write)
bc75d33f 4998 setup_per_zone_wmarks();
1da177e4
LT
4999 return 0;
5000}
5001
9614634f
CL
5002#ifdef CONFIG_NUMA
5003int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5004 void __user *buffer, size_t *length, loff_t *ppos)
9614634f
CL
5005{
5006 struct zone *zone;
5007 int rc;
5008
8d65af78 5009 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9614634f
CL
5010 if (rc)
5011 return rc;
5012
5013 for_each_zone(zone)
8417bba4 5014 zone->min_unmapped_pages = (zone->present_pages *
9614634f
CL
5015 sysctl_min_unmapped_ratio) / 100;
5016 return 0;
5017}
0ff38490
CL
5018
5019int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5020 void __user *buffer, size_t *length, loff_t *ppos)
0ff38490
CL
5021{
5022 struct zone *zone;
5023 int rc;
5024
8d65af78 5025 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
0ff38490
CL
5026 if (rc)
5027 return rc;
5028
5029 for_each_zone(zone)
5030 zone->min_slab_pages = (zone->present_pages *
5031 sysctl_min_slab_ratio) / 100;
5032 return 0;
5033}
9614634f
CL
5034#endif
5035
1da177e4
LT
5036/*
5037 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5038 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5039 * whenever sysctl_lowmem_reserve_ratio changes.
5040 *
5041 * The reserve ratio obviously has absolutely no relation with the
41858966 5042 * minimum watermarks. The lowmem reserve ratio can only make sense
1da177e4
LT
5043 * if in function of the boot time zone sizes.
5044 */
5045int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
8d65af78 5046 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 5047{
8d65af78 5048 proc_dointvec_minmax(table, write, buffer, length, ppos);
1da177e4
LT
5049 setup_per_zone_lowmem_reserve();
5050 return 0;
5051}
5052
8ad4b1fb
RS
5053/*
5054 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5055 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5056 * can have before it gets flushed back to buddy allocator.
5057 */
5058
5059int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
8d65af78 5060 void __user *buffer, size_t *length, loff_t *ppos)
8ad4b1fb
RS
5061{
5062 struct zone *zone;
5063 unsigned int cpu;
5064 int ret;
5065
8d65af78 5066 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8ad4b1fb
RS
5067 if (!write || (ret == -EINVAL))
5068 return ret;
364df0eb 5069 for_each_populated_zone(zone) {
99dcc3e5 5070 for_each_possible_cpu(cpu) {
8ad4b1fb
RS
5071 unsigned long high;
5072 high = zone->present_pages / percpu_pagelist_fraction;
99dcc3e5
CL
5073 setup_pagelist_highmark(
5074 per_cpu_ptr(zone->pageset, cpu), high);
8ad4b1fb
RS
5075 }
5076 }
5077 return 0;
5078}
5079
f034b5d4 5080int hashdist = HASHDIST_DEFAULT;
1da177e4
LT
5081
5082#ifdef CONFIG_NUMA
5083static int __init set_hashdist(char *str)
5084{
5085 if (!str)
5086 return 0;
5087 hashdist = simple_strtoul(str, &str, 0);
5088 return 1;
5089}
5090__setup("hashdist=", set_hashdist);
5091#endif
5092
5093/*
5094 * allocate a large system hash table from bootmem
5095 * - it is assumed that the hash table must contain an exact power-of-2
5096 * quantity of entries
5097 * - limit is the number of hash buckets, not the total allocation size
5098 */
5099void *__init alloc_large_system_hash(const char *tablename,
5100 unsigned long bucketsize,
5101 unsigned long numentries,
5102 int scale,
5103 int flags,
5104 unsigned int *_hash_shift,
5105 unsigned int *_hash_mask,
5106 unsigned long limit)
5107{
5108 unsigned long long max = limit;
5109 unsigned long log2qty, size;
5110 void *table = NULL;
5111
5112 /* allow the kernel cmdline to have a say */
5113 if (!numentries) {
5114 /* round applicable memory size up to nearest megabyte */
04903664 5115 numentries = nr_kernel_pages;
1da177e4
LT
5116 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5117 numentries >>= 20 - PAGE_SHIFT;
5118 numentries <<= 20 - PAGE_SHIFT;
5119
5120 /* limit to 1 bucket per 2^scale bytes of low memory */
5121 if (scale > PAGE_SHIFT)
5122 numentries >>= (scale - PAGE_SHIFT);
5123 else
5124 numentries <<= (PAGE_SHIFT - scale);
9ab37b8f
PM
5125
5126 /* Make sure we've got at least a 0-order allocation.. */
2c85f51d
JB
5127 if (unlikely(flags & HASH_SMALL)) {
5128 /* Makes no sense without HASH_EARLY */
5129 WARN_ON(!(flags & HASH_EARLY));
5130 if (!(numentries >> *_hash_shift)) {
5131 numentries = 1UL << *_hash_shift;
5132 BUG_ON(!numentries);
5133 }
5134 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9ab37b8f 5135 numentries = PAGE_SIZE / bucketsize;
1da177e4 5136 }
6e692ed3 5137 numentries = roundup_pow_of_two(numentries);
1da177e4
LT
5138
5139 /* limit allocation size to 1/16 total memory by default */
5140 if (max == 0) {
5141 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5142 do_div(max, bucketsize);
5143 }
5144
5145 if (numentries > max)
5146 numentries = max;
5147
f0d1b0b3 5148 log2qty = ilog2(numentries);
1da177e4
LT
5149
5150 do {
5151 size = bucketsize << log2qty;
5152 if (flags & HASH_EARLY)
74768ed8 5153 table = alloc_bootmem_nopanic(size);
1da177e4
LT
5154 else if (hashdist)
5155 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5156 else {
1037b83b
ED
5157 /*
5158 * If bucketsize is not a power-of-two, we may free
a1dd268c
MG
5159 * some pages at the end of hash table which
5160 * alloc_pages_exact() automatically does
1037b83b 5161 */
264ef8a9 5162 if (get_order(size) < MAX_ORDER) {
a1dd268c 5163 table = alloc_pages_exact(size, GFP_ATOMIC);
264ef8a9
CM
5164 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5165 }
1da177e4
LT
5166 }
5167 } while (!table && size > PAGE_SIZE && --log2qty);
5168
5169 if (!table)
5170 panic("Failed to allocate %s hash table\n", tablename);
5171
b49ad484 5172 printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
1da177e4
LT
5173 tablename,
5174 (1U << log2qty),
f0d1b0b3 5175 ilog2(size) - PAGE_SHIFT,
1da177e4
LT
5176 size);
5177
5178 if (_hash_shift)
5179 *_hash_shift = log2qty;
5180 if (_hash_mask)
5181 *_hash_mask = (1 << log2qty) - 1;
5182
5183 return table;
5184}
a117e66e 5185
835c134e
MG
5186/* Return a pointer to the bitmap storing bits affecting a block of pages */
5187static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5188 unsigned long pfn)
5189{
5190#ifdef CONFIG_SPARSEMEM
5191 return __pfn_to_section(pfn)->pageblock_flags;
5192#else
5193 return zone->pageblock_flags;
5194#endif /* CONFIG_SPARSEMEM */
5195}
5196
5197static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5198{
5199#ifdef CONFIG_SPARSEMEM
5200 pfn &= (PAGES_PER_SECTION-1);
d9c23400 5201 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5202#else
5203 pfn = pfn - zone->zone_start_pfn;
d9c23400 5204 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
835c134e
MG
5205#endif /* CONFIG_SPARSEMEM */
5206}
5207
5208/**
d9c23400 5209 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
835c134e
MG
5210 * @page: The page within the block of interest
5211 * @start_bitidx: The first bit of interest to retrieve
5212 * @end_bitidx: The last bit of interest
5213 * returns pageblock_bits flags
5214 */
5215unsigned long get_pageblock_flags_group(struct page *page,
5216 int start_bitidx, int end_bitidx)
5217{
5218 struct zone *zone;
5219 unsigned long *bitmap;
5220 unsigned long pfn, bitidx;
5221 unsigned long flags = 0;
5222 unsigned long value = 1;
5223
5224 zone = page_zone(page);
5225 pfn = page_to_pfn(page);
5226 bitmap = get_pageblock_bitmap(zone, pfn);
5227 bitidx = pfn_to_bitidx(zone, pfn);
5228
5229 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5230 if (test_bit(bitidx + start_bitidx, bitmap))
5231 flags |= value;
6220ec78 5232
835c134e
MG
5233 return flags;
5234}
5235
5236/**
d9c23400 5237 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
835c134e
MG
5238 * @page: The page within the block of interest
5239 * @start_bitidx: The first bit of interest
5240 * @end_bitidx: The last bit of interest
5241 * @flags: The flags to set
5242 */
5243void set_pageblock_flags_group(struct page *page, unsigned long flags,
5244 int start_bitidx, int end_bitidx)
5245{
5246 struct zone *zone;
5247 unsigned long *bitmap;
5248 unsigned long pfn, bitidx;
5249 unsigned long value = 1;
5250
5251 zone = page_zone(page);
5252 pfn = page_to_pfn(page);
5253 bitmap = get_pageblock_bitmap(zone, pfn);
5254 bitidx = pfn_to_bitidx(zone, pfn);
86051ca5
KH
5255 VM_BUG_ON(pfn < zone->zone_start_pfn);
5256 VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
835c134e
MG
5257
5258 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5259 if (flags & value)
5260 __set_bit(bitidx + start_bitidx, bitmap);
5261 else
5262 __clear_bit(bitidx + start_bitidx, bitmap);
5263}
a5d76b54
KH
5264
5265/*
5266 * This is designed as sub function...plz see page_isolation.c also.
5267 * set/clear page block's type to be ISOLATE.
5268 * page allocater never alloc memory from ISOLATE block.
5269 */
5270
5271int set_migratetype_isolate(struct page *page)
5272{
5273 struct zone *zone;
925cc71e
RJ
5274 struct page *curr_page;
5275 unsigned long flags, pfn, iter;
5276 unsigned long immobile = 0;
5277 struct memory_isolate_notify arg;
5278 int notifier_ret;
a5d76b54 5279 int ret = -EBUSY;
8e7e40d9 5280 int zone_idx;
a5d76b54
KH
5281
5282 zone = page_zone(page);
8e7e40d9 5283 zone_idx = zone_idx(zone);
925cc71e 5284
a5d76b54 5285 spin_lock_irqsave(&zone->lock, flags);
925cc71e
RJ
5286 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
5287 zone_idx == ZONE_MOVABLE) {
5288 ret = 0;
5289 goto out;
5290 }
5291
5292 pfn = page_to_pfn(page);
5293 arg.start_pfn = pfn;
5294 arg.nr_pages = pageblock_nr_pages;
5295 arg.pages_found = 0;
5296
a5d76b54 5297 /*
925cc71e
RJ
5298 * It may be possible to isolate a pageblock even if the
5299 * migratetype is not MIGRATE_MOVABLE. The memory isolation
5300 * notifier chain is used by balloon drivers to return the
5301 * number of pages in a range that are held by the balloon
5302 * driver to shrink memory. If all the pages are accounted for
5303 * by balloons, are free, or on the LRU, isolation can continue.
5304 * Later, for example, when memory hotplug notifier runs, these
5305 * pages reported as "can be isolated" should be isolated(freed)
5306 * by the balloon driver through the memory notifier chain.
a5d76b54 5307 */
925cc71e
RJ
5308 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5309 notifier_ret = notifier_to_errno(notifier_ret);
5310 if (notifier_ret || !arg.pages_found)
a5d76b54 5311 goto out;
925cc71e
RJ
5312
5313 for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
5314 if (!pfn_valid_within(pfn))
5315 continue;
5316
5317 curr_page = pfn_to_page(iter);
5318 if (!page_count(curr_page) || PageLRU(curr_page))
5319 continue;
5320
5321 immobile++;
5322 }
5323
5324 if (arg.pages_found == immobile)
5325 ret = 0;
5326
a5d76b54 5327out:
925cc71e
RJ
5328 if (!ret) {
5329 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5330 move_freepages_block(zone, page, MIGRATE_ISOLATE);
5331 }
5332
a5d76b54
KH
5333 spin_unlock_irqrestore(&zone->lock, flags);
5334 if (!ret)
9f8f2172 5335 drain_all_pages();
a5d76b54
KH
5336 return ret;
5337}
5338
5339void unset_migratetype_isolate(struct page *page)
5340{
5341 struct zone *zone;
5342 unsigned long flags;
5343 zone = page_zone(page);
5344 spin_lock_irqsave(&zone->lock, flags);
5345 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5346 goto out;
5347 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5348 move_freepages_block(zone, page, MIGRATE_MOVABLE);
5349out:
5350 spin_unlock_irqrestore(&zone->lock, flags);
5351}
0c0e6195
KH
5352
5353#ifdef CONFIG_MEMORY_HOTREMOVE
5354/*
5355 * All pages in the range must be isolated before calling this.
5356 */
5357void
5358__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5359{
5360 struct page *page;
5361 struct zone *zone;
5362 int order, i;
5363 unsigned long pfn;
5364 unsigned long flags;
5365 /* find the first valid pfn */
5366 for (pfn = start_pfn; pfn < end_pfn; pfn++)
5367 if (pfn_valid(pfn))
5368 break;
5369 if (pfn == end_pfn)
5370 return;
5371 zone = page_zone(pfn_to_page(pfn));
5372 spin_lock_irqsave(&zone->lock, flags);
5373 pfn = start_pfn;
5374 while (pfn < end_pfn) {
5375 if (!pfn_valid(pfn)) {
5376 pfn++;
5377 continue;
5378 }
5379 page = pfn_to_page(pfn);
5380 BUG_ON(page_count(page));
5381 BUG_ON(!PageBuddy(page));
5382 order = page_order(page);
5383#ifdef CONFIG_DEBUG_VM
5384 printk(KERN_INFO "remove from free list %lx %d %lx\n",
5385 pfn, 1 << order, end_pfn);
5386#endif
5387 list_del(&page->lru);
5388 rmv_page_order(page);
5389 zone->free_area[order].nr_free--;
5390 __mod_zone_page_state(zone, NR_FREE_PAGES,
5391 - (1UL << order));
5392 for (i = 0; i < (1 << order); i++)
5393 SetPageReserved((page+i));
5394 pfn += (1 << order);
5395 }
5396 spin_unlock_irqrestore(&zone->lock, flags);
5397}
5398#endif
8d22ba1b
WF
5399
5400#ifdef CONFIG_MEMORY_FAILURE
5401bool is_free_buddy_page(struct page *page)
5402{
5403 struct zone *zone = page_zone(page);
5404 unsigned long pfn = page_to_pfn(page);
5405 unsigned long flags;
5406 int order;
5407
5408 spin_lock_irqsave(&zone->lock, flags);
5409 for (order = 0; order < MAX_ORDER; order++) {
5410 struct page *page_head = page - (pfn & ((1 << order) - 1));
5411
5412 if (PageBuddy(page_head) && page_order(page_head) >= order)
5413 break;
5414 }
5415 spin_unlock_irqrestore(&zone->lock, flags);
5416
5417 return order < MAX_ORDER;
5418}
5419#endif
718a3821
WF
5420
5421static struct trace_print_flags pageflag_names[] = {
5422 {1UL << PG_locked, "locked" },
5423 {1UL << PG_error, "error" },
5424 {1UL << PG_referenced, "referenced" },
5425 {1UL << PG_uptodate, "uptodate" },
5426 {1UL << PG_dirty, "dirty" },
5427 {1UL << PG_lru, "lru" },
5428 {1UL << PG_active, "active" },
5429 {1UL << PG_slab, "slab" },
5430 {1UL << PG_owner_priv_1, "owner_priv_1" },
5431 {1UL << PG_arch_1, "arch_1" },
5432 {1UL << PG_reserved, "reserved" },
5433 {1UL << PG_private, "private" },
5434 {1UL << PG_private_2, "private_2" },
5435 {1UL << PG_writeback, "writeback" },
5436#ifdef CONFIG_PAGEFLAGS_EXTENDED
5437 {1UL << PG_head, "head" },
5438 {1UL << PG_tail, "tail" },
5439#else
5440 {1UL << PG_compound, "compound" },
5441#endif
5442 {1UL << PG_swapcache, "swapcache" },
5443 {1UL << PG_mappedtodisk, "mappedtodisk" },
5444 {1UL << PG_reclaim, "reclaim" },
5445 {1UL << PG_buddy, "buddy" },
5446 {1UL << PG_swapbacked, "swapbacked" },
5447 {1UL << PG_unevictable, "unevictable" },
5448#ifdef CONFIG_MMU
5449 {1UL << PG_mlocked, "mlocked" },
5450#endif
5451#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5452 {1UL << PG_uncached, "uncached" },
5453#endif
5454#ifdef CONFIG_MEMORY_FAILURE
5455 {1UL << PG_hwpoison, "hwpoison" },
5456#endif
5457 {-1UL, NULL },
5458};
5459
5460static void dump_page_flags(unsigned long flags)
5461{
5462 const char *delim = "";
5463 unsigned long mask;
5464 int i;
5465
5466 printk(KERN_ALERT "page flags: %#lx(", flags);
5467
5468 /* remove zone id */
5469 flags &= (1UL << NR_PAGEFLAGS) - 1;
5470
5471 for (i = 0; pageflag_names[i].name && flags; i++) {
5472
5473 mask = pageflag_names[i].mask;
5474 if ((flags & mask) != mask)
5475 continue;
5476
5477 flags &= ~mask;
5478 printk("%s%s", delim, pageflag_names[i].name);
5479 delim = "|";
5480 }
5481
5482 /* check for left over flags */
5483 if (flags)
5484 printk("%s%#lx", delim, flags);
5485
5486 printk(")\n");
5487}
5488
5489void dump_page(struct page *page)
5490{
5491 printk(KERN_ALERT
5492 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5493 page, page_count(page), page_mapcount(page),
5494 page->mapping, page->index);
5495 dump_page_flags(page->flags);
5496}