]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/mempolicy.c
mempolicy: convert MPOL constants to enum
[net-next-2.6.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
1da177e4
LT
75#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
b488893a 81#include <linux/nsproxy.h>
1da177e4
LT
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
dc9aa5b9 85#include <linux/swap.h>
1a75a6c8
CL
86#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
b20a3503 88#include <linux/migrate.h>
95a402c3 89#include <linux/rmap.h>
86c3a764 90#include <linux/security.h>
dbcb0f19 91#include <linux/syscalls.h>
dc9aa5b9 92
1da177e4
LT
93#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
38e35860 96/* Internal flags */
dc9aa5b9 97#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 98#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 99#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 100
fcc234f8
PE
101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
1da177e4 103
1da177e4
LT
104/* Highest zone. An specific allocation for a zone below that is not
105 policied. */
6267276f 106enum zone_type policy_zone = 0;
1da177e4 107
d42c6997 108struct mempolicy default_policy = {
1da177e4
LT
109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
111};
112
dbcb0f19
AB
113static void mpol_rebind_policy(struct mempolicy *pol,
114 const nodemask_t *newmask);
115
1da177e4 116/* Do sanity checking on a policy */
a3b51e01 117static int mpol_check_policy(unsigned short mode, nodemask_t *nodes)
1da177e4 118{
31f1de46
KM
119 int was_empty, is_empty;
120
121 if (!nodes)
122 return 0;
123
124 /*
125 * "Contextualize" the in-coming nodemast for cpusets:
126 * Remember whether in-coming nodemask was empty, If not,
127 * restrict the nodes to the allowed nodes in the cpuset.
128 * This is guaranteed to be a subset of nodes with memory.
129 */
130 cpuset_update_task_memory_state();
131 is_empty = was_empty = nodes_empty(*nodes);
132 if (!was_empty) {
133 nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
134 is_empty = nodes_empty(*nodes); /* after "contextualization" */
135 }
1da177e4
LT
136
137 switch (mode) {
138 case MPOL_DEFAULT:
31f1de46
KM
139 /*
140 * require caller to specify an empty nodemask
141 * before "contextualization"
142 */
143 if (!was_empty)
1da177e4
LT
144 return -EINVAL;
145 break;
146 case MPOL_BIND:
147 case MPOL_INTERLEAVE:
31f1de46
KM
148 /*
149 * require at least 1 valid node after "contextualization"
150 */
151 if (is_empty)
152 return -EINVAL;
153 break;
154 case MPOL_PREFERRED:
155 /*
156 * Did caller specify invalid nodes?
157 * Don't silently accept this as "local allocation".
158 */
159 if (!was_empty && is_empty)
1da177e4
LT
160 return -EINVAL;
161 break;
a3b51e01
DR
162 default:
163 BUG();
1da177e4 164 }
31f1de46 165 return 0;
1da177e4 166}
dd942ae3 167
19770b32
MG
168/* Check that the nodemask contains at least one populated zone */
169static int is_valid_nodemask(nodemask_t *nodemask)
1da177e4 170{
19770b32 171 int nd, k;
1da177e4 172
19770b32
MG
173 /* Check that there is something useful in this mask */
174 k = policy_zone;
175
176 for_each_node_mask(nd, *nodemask) {
177 struct zone *z;
178
179 for (k = 0; k <= policy_zone; k++) {
180 z = &NODE_DATA(nd)->node_zones[k];
181 if (z->present_pages > 0)
182 return 1;
dd942ae3 183 }
8af5e2eb 184 }
19770b32
MG
185
186 return 0;
1da177e4
LT
187}
188
189/* Create a new policy */
a3b51e01 190static struct mempolicy *mpol_new(unsigned short mode, nodemask_t *nodes)
1da177e4
LT
191{
192 struct mempolicy *policy;
193
140d5a49
PM
194 pr_debug("setting mode %d nodes[0] %lx\n",
195 mode, nodes ? nodes_addr(*nodes)[0] : -1);
196
1da177e4
LT
197 if (mode == MPOL_DEFAULT)
198 return NULL;
199 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
200 if (!policy)
201 return ERR_PTR(-ENOMEM);
202 atomic_set(&policy->refcnt, 1);
203 switch (mode) {
204 case MPOL_INTERLEAVE:
dfcd3c0d 205 policy->v.nodes = *nodes;
6eaf806a 206 if (nodes_weight(policy->v.nodes) == 0) {
8f493d79
AK
207 kmem_cache_free(policy_cache, policy);
208 return ERR_PTR(-EINVAL);
209 }
1da177e4
LT
210 break;
211 case MPOL_PREFERRED:
dfcd3c0d 212 policy->v.preferred_node = first_node(*nodes);
1da177e4
LT
213 if (policy->v.preferred_node >= MAX_NUMNODES)
214 policy->v.preferred_node = -1;
215 break;
216 case MPOL_BIND:
19770b32 217 if (!is_valid_nodemask(nodes)) {
1da177e4 218 kmem_cache_free(policy_cache, policy);
19770b32 219 return ERR_PTR(-EINVAL);
1da177e4 220 }
19770b32 221 policy->v.nodes = *nodes;
1da177e4 222 break;
a3b51e01
DR
223 default:
224 BUG();
1da177e4
LT
225 }
226 policy->policy = mode;
74cb2155 227 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
1da177e4
LT
228 return policy;
229}
230
397874df 231static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
232static void migrate_page_add(struct page *page, struct list_head *pagelist,
233 unsigned long flags);
1a75a6c8 234
38e35860 235/* Scan through pages checking if pages follow certain conditions. */
b5810039 236static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
237 unsigned long addr, unsigned long end,
238 const nodemask_t *nodes, unsigned long flags,
38e35860 239 void *private)
1da177e4 240{
91612e0d
HD
241 pte_t *orig_pte;
242 pte_t *pte;
705e87c0 243 spinlock_t *ptl;
941150a3 244
705e87c0 245 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 246 do {
6aab341e 247 struct page *page;
25ba77c1 248 int nid;
91612e0d
HD
249
250 if (!pte_present(*pte))
1da177e4 251 continue;
6aab341e
LT
252 page = vm_normal_page(vma, addr, *pte);
253 if (!page)
1da177e4 254 continue;
053837fc
NP
255 /*
256 * The check for PageReserved here is important to avoid
257 * handling zero pages and other pages that may have been
258 * marked special by the system.
259 *
260 * If the PageReserved would not be checked here then f.e.
261 * the location of the zero page could have an influence
262 * on MPOL_MF_STRICT, zero pages would be counted for
263 * the per node stats, and there would be useless attempts
264 * to put zero pages on the migration list.
265 */
f4598c8b
CL
266 if (PageReserved(page))
267 continue;
6aab341e 268 nid = page_to_nid(page);
38e35860
CL
269 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
270 continue;
271
1a75a6c8 272 if (flags & MPOL_MF_STATS)
397874df 273 gather_stats(page, private, pte_dirty(*pte));
053837fc 274 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 275 migrate_page_add(page, private, flags);
38e35860
CL
276 else
277 break;
91612e0d 278 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 279 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
280 return addr != end;
281}
282
b5810039 283static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
284 unsigned long addr, unsigned long end,
285 const nodemask_t *nodes, unsigned long flags,
38e35860 286 void *private)
91612e0d
HD
287{
288 pmd_t *pmd;
289 unsigned long next;
290
291 pmd = pmd_offset(pud, addr);
292 do {
293 next = pmd_addr_end(addr, end);
294 if (pmd_none_or_clear_bad(pmd))
295 continue;
dc9aa5b9 296 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 297 flags, private))
91612e0d
HD
298 return -EIO;
299 } while (pmd++, addr = next, addr != end);
300 return 0;
301}
302
b5810039 303static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
304 unsigned long addr, unsigned long end,
305 const nodemask_t *nodes, unsigned long flags,
38e35860 306 void *private)
91612e0d
HD
307{
308 pud_t *pud;
309 unsigned long next;
310
311 pud = pud_offset(pgd, addr);
312 do {
313 next = pud_addr_end(addr, end);
314 if (pud_none_or_clear_bad(pud))
315 continue;
dc9aa5b9 316 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 317 flags, private))
91612e0d
HD
318 return -EIO;
319 } while (pud++, addr = next, addr != end);
320 return 0;
321}
322
b5810039 323static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
324 unsigned long addr, unsigned long end,
325 const nodemask_t *nodes, unsigned long flags,
38e35860 326 void *private)
91612e0d
HD
327{
328 pgd_t *pgd;
329 unsigned long next;
330
b5810039 331 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
332 do {
333 next = pgd_addr_end(addr, end);
334 if (pgd_none_or_clear_bad(pgd))
335 continue;
dc9aa5b9 336 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 337 flags, private))
91612e0d
HD
338 return -EIO;
339 } while (pgd++, addr = next, addr != end);
340 return 0;
1da177e4
LT
341}
342
dc9aa5b9
CL
343/*
344 * Check if all pages in a range are on a set of nodes.
345 * If pagelist != NULL then isolate pages from the LRU and
346 * put them on the pagelist.
347 */
1da177e4
LT
348static struct vm_area_struct *
349check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 350 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
351{
352 int err;
353 struct vm_area_struct *first, *vma, *prev;
354
90036ee5 355 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
90036ee5 356
b20a3503
CL
357 err = migrate_prep();
358 if (err)
359 return ERR_PTR(err);
90036ee5 360 }
053837fc 361
1da177e4
LT
362 first = find_vma(mm, start);
363 if (!first)
364 return ERR_PTR(-EFAULT);
365 prev = NULL;
366 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
367 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
368 if (!vma->vm_next && vma->vm_end < end)
369 return ERR_PTR(-EFAULT);
370 if (prev && prev->vm_end < vma->vm_start)
371 return ERR_PTR(-EFAULT);
372 }
373 if (!is_vm_hugetlb_page(vma) &&
374 ((flags & MPOL_MF_STRICT) ||
375 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
376 vma_migratable(vma)))) {
5b952b3c 377 unsigned long endvma = vma->vm_end;
dc9aa5b9 378
5b952b3c
AK
379 if (endvma > end)
380 endvma = end;
381 if (vma->vm_start > start)
382 start = vma->vm_start;
dc9aa5b9 383 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 384 flags, private);
1da177e4
LT
385 if (err) {
386 first = ERR_PTR(err);
387 break;
388 }
389 }
390 prev = vma;
391 }
392 return first;
393}
394
395/* Apply policy to a single VMA */
396static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
397{
398 int err = 0;
399 struct mempolicy *old = vma->vm_policy;
400
140d5a49 401 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
1da177e4
LT
402 vma->vm_start, vma->vm_end, vma->vm_pgoff,
403 vma->vm_ops, vma->vm_file,
404 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
405
406 if (vma->vm_ops && vma->vm_ops->set_policy)
407 err = vma->vm_ops->set_policy(vma, new);
408 if (!err) {
409 mpol_get(new);
410 vma->vm_policy = new;
411 mpol_free(old);
412 }
413 return err;
414}
415
416/* Step 2: apply policy to a range and do splits. */
417static int mbind_range(struct vm_area_struct *vma, unsigned long start,
418 unsigned long end, struct mempolicy *new)
419{
420 struct vm_area_struct *next;
421 int err;
422
423 err = 0;
424 for (; vma && vma->vm_start < end; vma = next) {
425 next = vma->vm_next;
426 if (vma->vm_start < start)
427 err = split_vma(vma->vm_mm, vma, start, 1);
428 if (!err && vma->vm_end > end)
429 err = split_vma(vma->vm_mm, vma, end, 0);
430 if (!err)
431 err = policy_vma(vma, new);
432 if (err)
433 break;
434 }
435 return err;
436}
437
c61afb18
PJ
438/*
439 * Update task->flags PF_MEMPOLICY bit: set iff non-default
440 * mempolicy. Allows more rapid checking of this (combined perhaps
441 * with other PF_* flag bits) on memory allocation hot code paths.
442 *
443 * If called from outside this file, the task 'p' should -only- be
444 * a newly forked child not yet visible on the task list, because
445 * manipulating the task flags of a visible task is not safe.
446 *
447 * The above limitation is why this routine has the funny name
448 * mpol_fix_fork_child_flag().
449 *
450 * It is also safe to call this with a task pointer of current,
451 * which the static wrapper mpol_set_task_struct_flag() does,
452 * for use within this file.
453 */
454
455void mpol_fix_fork_child_flag(struct task_struct *p)
456{
457 if (p->mempolicy)
458 p->flags |= PF_MEMPOLICY;
459 else
460 p->flags &= ~PF_MEMPOLICY;
461}
462
463static void mpol_set_task_struct_flag(void)
464{
465 mpol_fix_fork_child_flag(current);
466}
467
1da177e4 468/* Set the process memory policy */
a3b51e01 469static long do_set_mempolicy(unsigned short mode, nodemask_t *nodes)
1da177e4 470{
1da177e4 471 struct mempolicy *new;
1da177e4 472
31f1de46 473 if (mpol_check_policy(mode, nodes))
1da177e4 474 return -EINVAL;
8bccd85f 475 new = mpol_new(mode, nodes);
1da177e4
LT
476 if (IS_ERR(new))
477 return PTR_ERR(new);
478 mpol_free(current->mempolicy);
479 current->mempolicy = new;
c61afb18 480 mpol_set_task_struct_flag();
1da177e4 481 if (new && new->policy == MPOL_INTERLEAVE)
dfcd3c0d 482 current->il_next = first_node(new->v.nodes);
1da177e4
LT
483 return 0;
484}
485
486/* Fill a zone bitmap for a policy */
dfcd3c0d 487static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 488{
dfcd3c0d 489 nodes_clear(*nodes);
1da177e4 490 switch (p->policy) {
1da177e4
LT
491 case MPOL_DEFAULT:
492 break;
19770b32
MG
493 case MPOL_BIND:
494 /* Fall through */
1da177e4 495 case MPOL_INTERLEAVE:
dfcd3c0d 496 *nodes = p->v.nodes;
1da177e4
LT
497 break;
498 case MPOL_PREFERRED:
56bbd65d 499 /* or use current node instead of memory_map? */
1da177e4 500 if (p->v.preferred_node < 0)
56bbd65d 501 *nodes = node_states[N_HIGH_MEMORY];
1da177e4 502 else
dfcd3c0d 503 node_set(p->v.preferred_node, *nodes);
1da177e4
LT
504 break;
505 default:
506 BUG();
507 }
508}
509
510static int lookup_node(struct mm_struct *mm, unsigned long addr)
511{
512 struct page *p;
513 int err;
514
515 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
516 if (err >= 0) {
517 err = page_to_nid(p);
518 put_page(p);
519 }
520 return err;
521}
522
1da177e4 523/* Retrieve NUMA policy */
dbcb0f19
AB
524static long do_get_mempolicy(int *policy, nodemask_t *nmask,
525 unsigned long addr, unsigned long flags)
1da177e4 526{
8bccd85f 527 int err;
1da177e4
LT
528 struct mm_struct *mm = current->mm;
529 struct vm_area_struct *vma = NULL;
530 struct mempolicy *pol = current->mempolicy;
531
cf2a473c 532 cpuset_update_task_memory_state();
754af6f5
LS
533 if (flags &
534 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 535 return -EINVAL;
754af6f5
LS
536
537 if (flags & MPOL_F_MEMS_ALLOWED) {
538 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
539 return -EINVAL;
540 *policy = 0; /* just so it's initialized */
541 *nmask = cpuset_current_mems_allowed;
542 return 0;
543 }
544
1da177e4
LT
545 if (flags & MPOL_F_ADDR) {
546 down_read(&mm->mmap_sem);
547 vma = find_vma_intersection(mm, addr, addr+1);
548 if (!vma) {
549 up_read(&mm->mmap_sem);
550 return -EFAULT;
551 }
552 if (vma->vm_ops && vma->vm_ops->get_policy)
553 pol = vma->vm_ops->get_policy(vma, addr);
554 else
555 pol = vma->vm_policy;
556 } else if (addr)
557 return -EINVAL;
558
559 if (!pol)
560 pol = &default_policy;
561
562 if (flags & MPOL_F_NODE) {
563 if (flags & MPOL_F_ADDR) {
564 err = lookup_node(mm, addr);
565 if (err < 0)
566 goto out;
8bccd85f 567 *policy = err;
1da177e4
LT
568 } else if (pol == current->mempolicy &&
569 pol->policy == MPOL_INTERLEAVE) {
8bccd85f 570 *policy = current->il_next;
1da177e4
LT
571 } else {
572 err = -EINVAL;
573 goto out;
574 }
575 } else
8bccd85f 576 *policy = pol->policy;
1da177e4
LT
577
578 if (vma) {
579 up_read(&current->mm->mmap_sem);
580 vma = NULL;
581 }
582
1da177e4 583 err = 0;
8bccd85f
CL
584 if (nmask)
585 get_zonemask(pol, nmask);
1da177e4
LT
586
587 out:
588 if (vma)
589 up_read(&current->mm->mmap_sem);
590 return err;
591}
592
b20a3503 593#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
594/*
595 * page migration
596 */
fc301289
CL
597static void migrate_page_add(struct page *page, struct list_head *pagelist,
598 unsigned long flags)
6ce3c4c0
CL
599{
600 /*
fc301289 601 * Avoid migrating a page that is shared with others.
6ce3c4c0 602 */
b20a3503
CL
603 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
604 isolate_lru_page(page, pagelist);
7e2ab150 605}
6ce3c4c0 606
742755a1 607static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 608{
769848c0 609 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
610}
611
7e2ab150
CL
612/*
613 * Migrate pages from one node to a target node.
614 * Returns error or the number of pages not migrated.
615 */
dbcb0f19
AB
616static int migrate_to_node(struct mm_struct *mm, int source, int dest,
617 int flags)
7e2ab150
CL
618{
619 nodemask_t nmask;
620 LIST_HEAD(pagelist);
621 int err = 0;
622
623 nodes_clear(nmask);
624 node_set(source, nmask);
6ce3c4c0 625
7e2ab150
CL
626 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
627 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
628
aaa994b3 629 if (!list_empty(&pagelist))
95a402c3
CL
630 err = migrate_pages(&pagelist, new_node_page, dest);
631
7e2ab150 632 return err;
6ce3c4c0
CL
633}
634
39743889 635/*
7e2ab150
CL
636 * Move pages between the two nodesets so as to preserve the physical
637 * layout as much as possible.
39743889
CL
638 *
639 * Returns the number of page that could not be moved.
640 */
641int do_migrate_pages(struct mm_struct *mm,
642 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
643{
644 LIST_HEAD(pagelist);
7e2ab150
CL
645 int busy = 0;
646 int err = 0;
647 nodemask_t tmp;
39743889 648
7e2ab150 649 down_read(&mm->mmap_sem);
39743889 650
7b2259b3
CL
651 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
652 if (err)
653 goto out;
654
7e2ab150
CL
655/*
656 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
657 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
658 * bit in 'tmp', and return that <source, dest> pair for migration.
659 * The pair of nodemasks 'to' and 'from' define the map.
660 *
661 * If no pair of bits is found that way, fallback to picking some
662 * pair of 'source' and 'dest' bits that are not the same. If the
663 * 'source' and 'dest' bits are the same, this represents a node
664 * that will be migrating to itself, so no pages need move.
665 *
666 * If no bits are left in 'tmp', or if all remaining bits left
667 * in 'tmp' correspond to the same bit in 'to', return false
668 * (nothing left to migrate).
669 *
670 * This lets us pick a pair of nodes to migrate between, such that
671 * if possible the dest node is not already occupied by some other
672 * source node, minimizing the risk of overloading the memory on a
673 * node that would happen if we migrated incoming memory to a node
674 * before migrating outgoing memory source that same node.
675 *
676 * A single scan of tmp is sufficient. As we go, we remember the
677 * most recent <s, d> pair that moved (s != d). If we find a pair
678 * that not only moved, but what's better, moved to an empty slot
679 * (d is not set in tmp), then we break out then, with that pair.
680 * Otherwise when we finish scannng from_tmp, we at least have the
681 * most recent <s, d> pair that moved. If we get all the way through
682 * the scan of tmp without finding any node that moved, much less
683 * moved to an empty node, then there is nothing left worth migrating.
684 */
d4984711 685
7e2ab150
CL
686 tmp = *from_nodes;
687 while (!nodes_empty(tmp)) {
688 int s,d;
689 int source = -1;
690 int dest = 0;
691
692 for_each_node_mask(s, tmp) {
693 d = node_remap(s, *from_nodes, *to_nodes);
694 if (s == d)
695 continue;
696
697 source = s; /* Node moved. Memorize */
698 dest = d;
699
700 /* dest not in remaining from nodes? */
701 if (!node_isset(dest, tmp))
702 break;
703 }
704 if (source == -1)
705 break;
706
707 node_clear(source, tmp);
708 err = migrate_to_node(mm, source, dest, flags);
709 if (err > 0)
710 busy += err;
711 if (err < 0)
712 break;
39743889 713 }
7b2259b3 714out:
39743889 715 up_read(&mm->mmap_sem);
7e2ab150
CL
716 if (err < 0)
717 return err;
718 return busy;
b20a3503
CL
719
720}
721
3ad33b24
LS
722/*
723 * Allocate a new page for page migration based on vma policy.
724 * Start assuming that page is mapped by vma pointed to by @private.
725 * Search forward from there, if not. N.B., this assumes that the
726 * list of pages handed to migrate_pages()--which is how we get here--
727 * is in virtual address order.
728 */
742755a1 729static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
730{
731 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 732 unsigned long uninitialized_var(address);
95a402c3 733
3ad33b24
LS
734 while (vma) {
735 address = page_address_in_vma(page, vma);
736 if (address != -EFAULT)
737 break;
738 vma = vma->vm_next;
739 }
740
741 /*
742 * if !vma, alloc_page_vma() will use task or system default policy
743 */
744 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 745}
b20a3503
CL
746#else
747
748static void migrate_page_add(struct page *page, struct list_head *pagelist,
749 unsigned long flags)
750{
39743889
CL
751}
752
b20a3503
CL
753int do_migrate_pages(struct mm_struct *mm,
754 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
755{
756 return -ENOSYS;
757}
95a402c3 758
69939749 759static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
760{
761 return NULL;
762}
b20a3503
CL
763#endif
764
dbcb0f19 765static long do_mbind(unsigned long start, unsigned long len,
a3b51e01 766 unsigned short mode, nodemask_t *nmask,
dbcb0f19 767 unsigned long flags)
6ce3c4c0
CL
768{
769 struct vm_area_struct *vma;
770 struct mm_struct *mm = current->mm;
771 struct mempolicy *new;
772 unsigned long end;
773 int err;
774 LIST_HEAD(pagelist);
775
a3b51e01
DR
776 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
777 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6ce3c4c0 778 return -EINVAL;
74c00241 779 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
780 return -EPERM;
781
782 if (start & ~PAGE_MASK)
783 return -EINVAL;
784
785 if (mode == MPOL_DEFAULT)
786 flags &= ~MPOL_MF_STRICT;
787
788 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
789 end = start + len;
790
791 if (end < start)
792 return -EINVAL;
793 if (end == start)
794 return 0;
795
796 if (mpol_check_policy(mode, nmask))
797 return -EINVAL;
798
799 new = mpol_new(mode, nmask);
800 if (IS_ERR(new))
801 return PTR_ERR(new);
802
803 /*
804 * If we are using the default policy then operation
805 * on discontinuous address spaces is okay after all
806 */
807 if (!new)
808 flags |= MPOL_MF_DISCONTIG_OK;
809
a3b51e01 810 pr_debug("mbind %lx-%lx mode:%d nodes:%lx\n", start, start + len,
140d5a49 811 mode, nmask ? nodes_addr(*nmask)[0] : -1);
6ce3c4c0
CL
812
813 down_write(&mm->mmap_sem);
814 vma = check_range(mm, start, end, nmask,
815 flags | MPOL_MF_INVERT, &pagelist);
816
817 err = PTR_ERR(vma);
818 if (!IS_ERR(vma)) {
819 int nr_failed = 0;
820
821 err = mbind_range(vma, start, end, new);
7e2ab150 822
6ce3c4c0 823 if (!list_empty(&pagelist))
95a402c3
CL
824 nr_failed = migrate_pages(&pagelist, new_vma_page,
825 (unsigned long)vma);
6ce3c4c0
CL
826
827 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
828 err = -EIO;
829 }
b20a3503 830
6ce3c4c0
CL
831 up_write(&mm->mmap_sem);
832 mpol_free(new);
833 return err;
834}
835
8bccd85f
CL
836/*
837 * User space interface with variable sized bitmaps for nodelists.
838 */
839
840/* Copy a node mask from user space. */
39743889 841static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
842 unsigned long maxnode)
843{
844 unsigned long k;
845 unsigned long nlongs;
846 unsigned long endmask;
847
848 --maxnode;
849 nodes_clear(*nodes);
850 if (maxnode == 0 || !nmask)
851 return 0;
a9c930ba 852 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 853 return -EINVAL;
8bccd85f
CL
854
855 nlongs = BITS_TO_LONGS(maxnode);
856 if ((maxnode % BITS_PER_LONG) == 0)
857 endmask = ~0UL;
858 else
859 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
860
861 /* When the user specified more nodes than supported just check
862 if the non supported part is all zero. */
863 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
864 if (nlongs > PAGE_SIZE/sizeof(long))
865 return -EINVAL;
866 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
867 unsigned long t;
868 if (get_user(t, nmask + k))
869 return -EFAULT;
870 if (k == nlongs - 1) {
871 if (t & endmask)
872 return -EINVAL;
873 } else if (t)
874 return -EINVAL;
875 }
876 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
877 endmask = ~0UL;
878 }
879
880 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
881 return -EFAULT;
882 nodes_addr(*nodes)[nlongs-1] &= endmask;
883 return 0;
884}
885
886/* Copy a kernel node mask to user space */
887static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
888 nodemask_t *nodes)
889{
890 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
891 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
892
893 if (copy > nbytes) {
894 if (copy > PAGE_SIZE)
895 return -EINVAL;
896 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
897 return -EFAULT;
898 copy = nbytes;
899 }
900 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
901}
902
903asmlinkage long sys_mbind(unsigned long start, unsigned long len,
904 unsigned long mode,
905 unsigned long __user *nmask, unsigned long maxnode,
906 unsigned flags)
907{
908 nodemask_t nodes;
909 int err;
910
a3b51e01
DR
911 if (mode >= MPOL_MAX)
912 return -EINVAL;
8bccd85f
CL
913 err = get_nodes(&nodes, nmask, maxnode);
914 if (err)
915 return err;
916 return do_mbind(start, len, mode, &nodes, flags);
917}
918
919/* Set the process memory policy */
920asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
921 unsigned long maxnode)
922{
923 int err;
924 nodemask_t nodes;
925
a3b51e01 926 if (mode < 0 || mode >= MPOL_MAX)
8bccd85f
CL
927 return -EINVAL;
928 err = get_nodes(&nodes, nmask, maxnode);
929 if (err)
930 return err;
931 return do_set_mempolicy(mode, &nodes);
932}
933
39743889
CL
934asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
935 const unsigned long __user *old_nodes,
936 const unsigned long __user *new_nodes)
937{
938 struct mm_struct *mm;
939 struct task_struct *task;
940 nodemask_t old;
941 nodemask_t new;
942 nodemask_t task_nodes;
943 int err;
944
945 err = get_nodes(&old, old_nodes, maxnode);
946 if (err)
947 return err;
948
949 err = get_nodes(&new, new_nodes, maxnode);
950 if (err)
951 return err;
952
953 /* Find the mm_struct */
954 read_lock(&tasklist_lock);
228ebcbe 955 task = pid ? find_task_by_vpid(pid) : current;
39743889
CL
956 if (!task) {
957 read_unlock(&tasklist_lock);
958 return -ESRCH;
959 }
960 mm = get_task_mm(task);
961 read_unlock(&tasklist_lock);
962
963 if (!mm)
964 return -EINVAL;
965
966 /*
967 * Check if this process has the right to modify the specified
968 * process. The right exists if the process has administrative
7f927fcc 969 * capabilities, superuser privileges or the same
39743889
CL
970 * userid as the target process.
971 */
972 if ((current->euid != task->suid) && (current->euid != task->uid) &&
973 (current->uid != task->suid) && (current->uid != task->uid) &&
74c00241 974 !capable(CAP_SYS_NICE)) {
39743889
CL
975 err = -EPERM;
976 goto out;
977 }
978
979 task_nodes = cpuset_mems_allowed(task);
980 /* Is the user allowed to access the target nodes? */
74c00241 981 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
982 err = -EPERM;
983 goto out;
984 }
985
37b07e41 986 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
3b42d28b
CL
987 err = -EINVAL;
988 goto out;
989 }
990
86c3a764
DQ
991 err = security_task_movememory(task);
992 if (err)
993 goto out;
994
511030bc 995 err = do_migrate_pages(mm, &old, &new,
74c00241 996 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
997out:
998 mmput(mm);
999 return err;
1000}
1001
1002
8bccd85f
CL
1003/* Retrieve NUMA policy */
1004asmlinkage long sys_get_mempolicy(int __user *policy,
1005 unsigned long __user *nmask,
1006 unsigned long maxnode,
1007 unsigned long addr, unsigned long flags)
1008{
dbcb0f19
AB
1009 int err;
1010 int uninitialized_var(pval);
8bccd85f
CL
1011 nodemask_t nodes;
1012
1013 if (nmask != NULL && maxnode < MAX_NUMNODES)
1014 return -EINVAL;
1015
1016 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1017
1018 if (err)
1019 return err;
1020
1021 if (policy && put_user(pval, policy))
1022 return -EFAULT;
1023
1024 if (nmask)
1025 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1026
1027 return err;
1028}
1029
1da177e4
LT
1030#ifdef CONFIG_COMPAT
1031
1032asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1033 compat_ulong_t __user *nmask,
1034 compat_ulong_t maxnode,
1035 compat_ulong_t addr, compat_ulong_t flags)
1036{
1037 long err;
1038 unsigned long __user *nm = NULL;
1039 unsigned long nr_bits, alloc_size;
1040 DECLARE_BITMAP(bm, MAX_NUMNODES);
1041
1042 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1043 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1044
1045 if (nmask)
1046 nm = compat_alloc_user_space(alloc_size);
1047
1048 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1049
1050 if (!err && nmask) {
1051 err = copy_from_user(bm, nm, alloc_size);
1052 /* ensure entire bitmap is zeroed */
1053 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1054 err |= compat_put_bitmap(nmask, bm, nr_bits);
1055 }
1056
1057 return err;
1058}
1059
1060asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1061 compat_ulong_t maxnode)
1062{
1063 long err = 0;
1064 unsigned long __user *nm = NULL;
1065 unsigned long nr_bits, alloc_size;
1066 DECLARE_BITMAP(bm, MAX_NUMNODES);
1067
1068 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1069 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1070
1071 if (nmask) {
1072 err = compat_get_bitmap(bm, nmask, nr_bits);
1073 nm = compat_alloc_user_space(alloc_size);
1074 err |= copy_to_user(nm, bm, alloc_size);
1075 }
1076
1077 if (err)
1078 return -EFAULT;
1079
1080 return sys_set_mempolicy(mode, nm, nr_bits+1);
1081}
1082
1083asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1084 compat_ulong_t mode, compat_ulong_t __user *nmask,
1085 compat_ulong_t maxnode, compat_ulong_t flags)
1086{
1087 long err = 0;
1088 unsigned long __user *nm = NULL;
1089 unsigned long nr_bits, alloc_size;
dfcd3c0d 1090 nodemask_t bm;
1da177e4
LT
1091
1092 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1093 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1094
1095 if (nmask) {
dfcd3c0d 1096 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1097 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1098 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1099 }
1100
1101 if (err)
1102 return -EFAULT;
1103
1104 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1105}
1106
1107#endif
1108
480eccf9
LS
1109/*
1110 * get_vma_policy(@task, @vma, @addr)
1111 * @task - task for fallback if vma policy == default
1112 * @vma - virtual memory area whose policy is sought
1113 * @addr - address in @vma for shared policy lookup
1114 *
1115 * Returns effective policy for a VMA at specified address.
1116 * Falls back to @task or system default policy, as necessary.
1117 * Returned policy has extra reference count if shared, vma,
1118 * or some other task's policy [show_numa_maps() can pass
1119 * @task != current]. It is the caller's responsibility to
1120 * free the reference in these cases.
1121 */
48fce342
CL
1122static struct mempolicy * get_vma_policy(struct task_struct *task,
1123 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1124{
6e21c8f1 1125 struct mempolicy *pol = task->mempolicy;
480eccf9 1126 int shared_pol = 0;
1da177e4
LT
1127
1128 if (vma) {
480eccf9 1129 if (vma->vm_ops && vma->vm_ops->get_policy) {
8bccd85f 1130 pol = vma->vm_ops->get_policy(vma, addr);
480eccf9
LS
1131 shared_pol = 1; /* if pol non-NULL, add ref below */
1132 } else if (vma->vm_policy &&
1da177e4
LT
1133 vma->vm_policy->policy != MPOL_DEFAULT)
1134 pol = vma->vm_policy;
1135 }
1136 if (!pol)
1137 pol = &default_policy;
480eccf9
LS
1138 else if (!shared_pol && pol != current->mempolicy)
1139 mpol_get(pol); /* vma or other task's policy */
1da177e4
LT
1140 return pol;
1141}
1142
19770b32
MG
1143/* Return a nodemask representing a mempolicy */
1144static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1145{
1146 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1147 if (unlikely(policy->policy == MPOL_BIND) &&
1148 gfp_zone(gfp) >= policy_zone &&
1149 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1150 return &policy->v.nodes;
1151
1152 return NULL;
1153}
1154
1da177e4 1155/* Return a zonelist representing a mempolicy */
dd0fc66f 1156static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1da177e4
LT
1157{
1158 int nd;
1159
1160 switch (policy->policy) {
1161 case MPOL_PREFERRED:
1162 nd = policy->v.preferred_node;
1163 if (nd < 0)
1164 nd = numa_node_id();
1165 break;
1166 case MPOL_BIND:
19770b32
MG
1167 /*
1168 * Normally, MPOL_BIND allocations node-local are node-local
1169 * within the allowed nodemask. However, if __GFP_THISNODE is
1170 * set and the current node is part of the mask, we use the
1171 * the zonelist for the first node in the mask instead.
1172 */
1173 nd = numa_node_id();
1174 if (unlikely(gfp & __GFP_THISNODE) &&
1175 unlikely(!node_isset(nd, policy->v.nodes)))
1176 nd = first_node(policy->v.nodes);
1177 break;
1da177e4
LT
1178 case MPOL_INTERLEAVE: /* should not happen */
1179 case MPOL_DEFAULT:
1180 nd = numa_node_id();
1181 break;
1182 default:
1183 nd = 0;
1184 BUG();
1185 }
0e88460d 1186 return node_zonelist(nd, gfp);
1da177e4
LT
1187}
1188
1189/* Do dynamic interleaving for a process */
1190static unsigned interleave_nodes(struct mempolicy *policy)
1191{
1192 unsigned nid, next;
1193 struct task_struct *me = current;
1194
1195 nid = me->il_next;
dfcd3c0d 1196 next = next_node(nid, policy->v.nodes);
1da177e4 1197 if (next >= MAX_NUMNODES)
dfcd3c0d 1198 next = first_node(policy->v.nodes);
1da177e4
LT
1199 me->il_next = next;
1200 return nid;
1201}
1202
dc85da15
CL
1203/*
1204 * Depending on the memory policy provide a node from which to allocate the
1205 * next slab entry.
1206 */
1207unsigned slab_node(struct mempolicy *policy)
1208{
a3b51e01 1209 unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
765c4507
CL
1210
1211 switch (pol) {
dc85da15
CL
1212 case MPOL_INTERLEAVE:
1213 return interleave_nodes(policy);
1214
dd1a239f 1215 case MPOL_BIND: {
dc85da15
CL
1216 /*
1217 * Follow bind policy behavior and start allocation at the
1218 * first node.
1219 */
19770b32
MG
1220 struct zonelist *zonelist;
1221 struct zone *zone;
1222 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1223 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1224 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1225 &policy->v.nodes,
1226 &zone);
1227 return zone->node;
dd1a239f 1228 }
dc85da15
CL
1229
1230 case MPOL_PREFERRED:
1231 if (policy->v.preferred_node >= 0)
1232 return policy->v.preferred_node;
1233 /* Fall through */
1234
1235 default:
1236 return numa_node_id();
1237 }
1238}
1239
1da177e4
LT
1240/* Do static interleaving for a VMA with known offset. */
1241static unsigned offset_il_node(struct mempolicy *pol,
1242 struct vm_area_struct *vma, unsigned long off)
1243{
dfcd3c0d 1244 unsigned nnodes = nodes_weight(pol->v.nodes);
1da177e4
LT
1245 unsigned target = (unsigned)off % nnodes;
1246 int c;
1247 int nid = -1;
1248
1249 c = 0;
1250 do {
dfcd3c0d 1251 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1252 c++;
1253 } while (c <= target);
1da177e4
LT
1254 return nid;
1255}
1256
5da7ca86
CL
1257/* Determine a node number for interleave */
1258static inline unsigned interleave_nid(struct mempolicy *pol,
1259 struct vm_area_struct *vma, unsigned long addr, int shift)
1260{
1261 if (vma) {
1262 unsigned long off;
1263
3b98b087
NA
1264 /*
1265 * for small pages, there is no difference between
1266 * shift and PAGE_SHIFT, so the bit-shift is safe.
1267 * for huge pages, since vm_pgoff is in units of small
1268 * pages, we need to shift off the always 0 bits to get
1269 * a useful offset.
1270 */
1271 BUG_ON(shift < PAGE_SHIFT);
1272 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1273 off += (addr - vma->vm_start) >> shift;
1274 return offset_il_node(pol, vma, off);
1275 } else
1276 return interleave_nodes(pol);
1277}
1278
00ac59ad 1279#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1280/*
1281 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1282 * @vma = virtual memory area whose policy is sought
1283 * @addr = address in @vma for shared policy lookup and interleave policy
1284 * @gfp_flags = for requested zone
19770b32
MG
1285 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1286 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9
LS
1287 *
1288 * Returns a zonelist suitable for a huge page allocation.
19770b32
MG
1289 * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1290 * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
480eccf9 1291 * If it is also a policy for which get_vma_policy() returns an extra
19770b32 1292 * reference, we must hold that reference until after the allocation.
480eccf9 1293 * In that case, return policy via @mpol so hugetlb allocation can drop
19770b32 1294 * the reference. For non-'BIND referenced policies, we can/do drop the
480eccf9
LS
1295 * reference here, so the caller doesn't need to know about the special case
1296 * for default and current task policy.
1297 */
396faf03 1298struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1299 gfp_t gfp_flags, struct mempolicy **mpol,
1300 nodemask_t **nodemask)
5da7ca86
CL
1301{
1302 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1303 struct zonelist *zl;
5da7ca86 1304
480eccf9 1305 *mpol = NULL; /* probably no unref needed */
19770b32
MG
1306 *nodemask = NULL; /* assume !MPOL_BIND */
1307 if (pol->policy == MPOL_BIND) {
1308 *nodemask = &pol->v.nodes;
1309 } else if (pol->policy == MPOL_INTERLEAVE) {
5da7ca86
CL
1310 unsigned nid;
1311
1312 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
69682d85
LS
1313 if (unlikely(pol != &default_policy &&
1314 pol != current->mempolicy))
1315 __mpol_free(pol); /* finished with pol */
0e88460d 1316 return node_zonelist(nid, gfp_flags);
5da7ca86 1317 }
480eccf9
LS
1318
1319 zl = zonelist_policy(GFP_HIGHUSER, pol);
1320 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1321 if (pol->policy != MPOL_BIND)
1322 __mpol_free(pol); /* finished with pol */
1323 else
1324 *mpol = pol; /* unref needed after allocation */
1325 }
1326 return zl;
5da7ca86 1327}
00ac59ad 1328#endif
5da7ca86 1329
1da177e4
LT
1330/* Allocate a page in interleaved policy.
1331 Own path because it needs to do special accounting. */
662f3a0b
AK
1332static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1333 unsigned nid)
1da177e4
LT
1334{
1335 struct zonelist *zl;
1336 struct page *page;
1337
0e88460d 1338 zl = node_zonelist(nid, gfp);
1da177e4 1339 page = __alloc_pages(gfp, order, zl);
dd1a239f 1340 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1341 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1342 return page;
1343}
1344
1345/**
1346 * alloc_page_vma - Allocate a page for a VMA.
1347 *
1348 * @gfp:
1349 * %GFP_USER user allocation.
1350 * %GFP_KERNEL kernel allocations,
1351 * %GFP_HIGHMEM highmem/user allocations,
1352 * %GFP_FS allocation should not call back into a file system.
1353 * %GFP_ATOMIC don't sleep.
1354 *
1355 * @vma: Pointer to VMA or NULL if not available.
1356 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1357 *
1358 * This function allocates a page from the kernel page pool and applies
1359 * a NUMA policy associated with the VMA or the current process.
1360 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1361 * mm_struct of the VMA to prevent it from going away. Should be used for
1362 * all allocations for pages that will be mapped into
1363 * user space. Returns NULL when no page can be allocated.
1364 *
1365 * Should be called with the mm_sem of the vma hold.
1366 */
1367struct page *
dd0fc66f 1368alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1369{
6e21c8f1 1370 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1371 struct zonelist *zl;
1da177e4 1372
cf2a473c 1373 cpuset_update_task_memory_state();
1da177e4
LT
1374
1375 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1376 unsigned nid;
5da7ca86
CL
1377
1378 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
69682d85
LS
1379 if (unlikely(pol != &default_policy &&
1380 pol != current->mempolicy))
1381 __mpol_free(pol); /* finished with pol */
1da177e4
LT
1382 return alloc_page_interleave(gfp, 0, nid);
1383 }
480eccf9
LS
1384 zl = zonelist_policy(gfp, pol);
1385 if (pol != &default_policy && pol != current->mempolicy) {
1386 /*
1387 * slow path: ref counted policy -- shared or vma
1388 */
19770b32
MG
1389 struct page *page = __alloc_pages_nodemask(gfp, 0,
1390 zl, nodemask_policy(gfp, pol));
480eccf9
LS
1391 __mpol_free(pol);
1392 return page;
1393 }
1394 /*
1395 * fast path: default or task policy
1396 */
19770b32 1397 return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
1da177e4
LT
1398}
1399
1400/**
1401 * alloc_pages_current - Allocate pages.
1402 *
1403 * @gfp:
1404 * %GFP_USER user allocation,
1405 * %GFP_KERNEL kernel allocation,
1406 * %GFP_HIGHMEM highmem allocation,
1407 * %GFP_FS don't call back into a file system.
1408 * %GFP_ATOMIC don't sleep.
1409 * @order: Power of two of allocation size in pages. 0 is a single page.
1410 *
1411 * Allocate a page from the kernel page pool. When not in
1412 * interrupt context and apply the current process NUMA policy.
1413 * Returns NULL when no page can be allocated.
1414 *
cf2a473c 1415 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1416 * 1) it's ok to take cpuset_sem (can WAIT), and
1417 * 2) allocating for current task (not interrupt).
1418 */
dd0fc66f 1419struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1420{
1421 struct mempolicy *pol = current->mempolicy;
1422
1423 if ((gfp & __GFP_WAIT) && !in_interrupt())
cf2a473c 1424 cpuset_update_task_memory_state();
9b819d20 1425 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4
LT
1426 pol = &default_policy;
1427 if (pol->policy == MPOL_INTERLEAVE)
1428 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
19770b32
MG
1429 return __alloc_pages_nodemask(gfp, order,
1430 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
1da177e4
LT
1431}
1432EXPORT_SYMBOL(alloc_pages_current);
1433
4225399a
PJ
1434/*
1435 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1436 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1437 * with the mems_allowed returned by cpuset_mems_allowed(). This
1438 * keeps mempolicies cpuset relative after its cpuset moves. See
1439 * further kernel/cpuset.c update_nodemask().
1440 */
4225399a 1441
1da177e4
LT
1442/* Slow path of a mempolicy copy */
1443struct mempolicy *__mpol_copy(struct mempolicy *old)
1444{
1445 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1446
1447 if (!new)
1448 return ERR_PTR(-ENOMEM);
4225399a
PJ
1449 if (current_cpuset_is_being_rebound()) {
1450 nodemask_t mems = cpuset_mems_allowed(current);
1451 mpol_rebind_policy(old, &mems);
1452 }
1da177e4
LT
1453 *new = *old;
1454 atomic_set(&new->refcnt, 1);
1da177e4
LT
1455 return new;
1456}
1457
1458/* Slow path of a mempolicy comparison */
1459int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1460{
1461 if (!a || !b)
1462 return 0;
1463 if (a->policy != b->policy)
1464 return 0;
1465 switch (a->policy) {
1466 case MPOL_DEFAULT:
1467 return 1;
19770b32
MG
1468 case MPOL_BIND:
1469 /* Fall through */
1da177e4 1470 case MPOL_INTERLEAVE:
dfcd3c0d 1471 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4
LT
1472 case MPOL_PREFERRED:
1473 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
1474 default:
1475 BUG();
1476 return 0;
1477 }
1478}
1479
1480/* Slow path of a mpol destructor. */
1481void __mpol_free(struct mempolicy *p)
1482{
1483 if (!atomic_dec_and_test(&p->refcnt))
1484 return;
1da177e4
LT
1485 p->policy = MPOL_DEFAULT;
1486 kmem_cache_free(policy_cache, p);
1487}
1488
1da177e4
LT
1489/*
1490 * Shared memory backing store policy support.
1491 *
1492 * Remember policies even when nobody has shared memory mapped.
1493 * The policies are kept in Red-Black tree linked from the inode.
1494 * They are protected by the sp->lock spinlock, which should be held
1495 * for any accesses to the tree.
1496 */
1497
1498/* lookup first element intersecting start-end */
1499/* Caller holds sp->lock */
1500static struct sp_node *
1501sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1502{
1503 struct rb_node *n = sp->root.rb_node;
1504
1505 while (n) {
1506 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1507
1508 if (start >= p->end)
1509 n = n->rb_right;
1510 else if (end <= p->start)
1511 n = n->rb_left;
1512 else
1513 break;
1514 }
1515 if (!n)
1516 return NULL;
1517 for (;;) {
1518 struct sp_node *w = NULL;
1519 struct rb_node *prev = rb_prev(n);
1520 if (!prev)
1521 break;
1522 w = rb_entry(prev, struct sp_node, nd);
1523 if (w->end <= start)
1524 break;
1525 n = prev;
1526 }
1527 return rb_entry(n, struct sp_node, nd);
1528}
1529
1530/* Insert a new shared policy into the list. */
1531/* Caller holds sp->lock */
1532static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1533{
1534 struct rb_node **p = &sp->root.rb_node;
1535 struct rb_node *parent = NULL;
1536 struct sp_node *nd;
1537
1538 while (*p) {
1539 parent = *p;
1540 nd = rb_entry(parent, struct sp_node, nd);
1541 if (new->start < nd->start)
1542 p = &(*p)->rb_left;
1543 else if (new->end > nd->end)
1544 p = &(*p)->rb_right;
1545 else
1546 BUG();
1547 }
1548 rb_link_node(&new->nd, parent, p);
1549 rb_insert_color(&new->nd, &sp->root);
140d5a49 1550 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1da177e4
LT
1551 new->policy ? new->policy->policy : 0);
1552}
1553
1554/* Find shared policy intersecting idx */
1555struct mempolicy *
1556mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1557{
1558 struct mempolicy *pol = NULL;
1559 struct sp_node *sn;
1560
1561 if (!sp->root.rb_node)
1562 return NULL;
1563 spin_lock(&sp->lock);
1564 sn = sp_lookup(sp, idx, idx+1);
1565 if (sn) {
1566 mpol_get(sn->policy);
1567 pol = sn->policy;
1568 }
1569 spin_unlock(&sp->lock);
1570 return pol;
1571}
1572
1573static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1574{
140d5a49 1575 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4
LT
1576 rb_erase(&n->nd, &sp->root);
1577 mpol_free(n->policy);
1578 kmem_cache_free(sn_cache, n);
1579}
1580
dbcb0f19
AB
1581static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1582 struct mempolicy *pol)
1da177e4
LT
1583{
1584 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1585
1586 if (!n)
1587 return NULL;
1588 n->start = start;
1589 n->end = end;
1590 mpol_get(pol);
1591 n->policy = pol;
1592 return n;
1593}
1594
1595/* Replace a policy range. */
1596static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1597 unsigned long end, struct sp_node *new)
1598{
1599 struct sp_node *n, *new2 = NULL;
1600
1601restart:
1602 spin_lock(&sp->lock);
1603 n = sp_lookup(sp, start, end);
1604 /* Take care of old policies in the same range. */
1605 while (n && n->start < end) {
1606 struct rb_node *next = rb_next(&n->nd);
1607 if (n->start >= start) {
1608 if (n->end <= end)
1609 sp_delete(sp, n);
1610 else
1611 n->start = end;
1612 } else {
1613 /* Old policy spanning whole new range. */
1614 if (n->end > end) {
1615 if (!new2) {
1616 spin_unlock(&sp->lock);
1617 new2 = sp_alloc(end, n->end, n->policy);
1618 if (!new2)
1619 return -ENOMEM;
1620 goto restart;
1621 }
1622 n->end = start;
1623 sp_insert(sp, new2);
1624 new2 = NULL;
1625 break;
1626 } else
1627 n->end = start;
1628 }
1629 if (!next)
1630 break;
1631 n = rb_entry(next, struct sp_node, nd);
1632 }
1633 if (new)
1634 sp_insert(sp, new);
1635 spin_unlock(&sp->lock);
1636 if (new2) {
1637 mpol_free(new2->policy);
1638 kmem_cache_free(sn_cache, new2);
1639 }
1640 return 0;
1641}
1642
a3b51e01 1643void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
7339ff83
RH
1644 nodemask_t *policy_nodes)
1645{
1646 info->root = RB_ROOT;
1647 spin_lock_init(&info->lock);
1648
1649 if (policy != MPOL_DEFAULT) {
1650 struct mempolicy *newpol;
1651
1652 /* Falls back to MPOL_DEFAULT on any error */
1653 newpol = mpol_new(policy, policy_nodes);
1654 if (!IS_ERR(newpol)) {
1655 /* Create pseudo-vma that contains just the policy */
1656 struct vm_area_struct pvma;
1657
1658 memset(&pvma, 0, sizeof(struct vm_area_struct));
1659 /* Policy covers entire file */
1660 pvma.vm_end = TASK_SIZE;
1661 mpol_set_shared_policy(info, &pvma, newpol);
1662 mpol_free(newpol);
1663 }
1664 }
1665}
1666
1da177e4
LT
1667int mpol_set_shared_policy(struct shared_policy *info,
1668 struct vm_area_struct *vma, struct mempolicy *npol)
1669{
1670 int err;
1671 struct sp_node *new = NULL;
1672 unsigned long sz = vma_pages(vma);
1673
140d5a49 1674 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
1da177e4
LT
1675 vma->vm_pgoff,
1676 sz, npol? npol->policy : -1,
140d5a49 1677 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
1678
1679 if (npol) {
1680 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1681 if (!new)
1682 return -ENOMEM;
1683 }
1684 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1685 if (err && new)
1686 kmem_cache_free(sn_cache, new);
1687 return err;
1688}
1689
1690/* Free a backing policy store on inode delete. */
1691void mpol_free_shared_policy(struct shared_policy *p)
1692{
1693 struct sp_node *n;
1694 struct rb_node *next;
1695
1696 if (!p->root.rb_node)
1697 return;
1698 spin_lock(&p->lock);
1699 next = rb_first(&p->root);
1700 while (next) {
1701 n = rb_entry(next, struct sp_node, nd);
1702 next = rb_next(&n->nd);
90c5029e 1703 rb_erase(&n->nd, &p->root);
1da177e4
LT
1704 mpol_free(n->policy);
1705 kmem_cache_free(sn_cache, n);
1706 }
1707 spin_unlock(&p->lock);
1da177e4
LT
1708}
1709
1710/* assumes fs == KERNEL_DS */
1711void __init numa_policy_init(void)
1712{
b71636e2
PM
1713 nodemask_t interleave_nodes;
1714 unsigned long largest = 0;
1715 int nid, prefer = 0;
1716
1da177e4
LT
1717 policy_cache = kmem_cache_create("numa_policy",
1718 sizeof(struct mempolicy),
20c2df83 1719 0, SLAB_PANIC, NULL);
1da177e4
LT
1720
1721 sn_cache = kmem_cache_create("shared_policy_node",
1722 sizeof(struct sp_node),
20c2df83 1723 0, SLAB_PANIC, NULL);
1da177e4 1724
b71636e2
PM
1725 /*
1726 * Set interleaving policy for system init. Interleaving is only
1727 * enabled across suitably sized nodes (default is >= 16MB), or
1728 * fall back to the largest node if they're all smaller.
1729 */
1730 nodes_clear(interleave_nodes);
56bbd65d 1731 for_each_node_state(nid, N_HIGH_MEMORY) {
b71636e2
PM
1732 unsigned long total_pages = node_present_pages(nid);
1733
1734 /* Preserve the largest node */
1735 if (largest < total_pages) {
1736 largest = total_pages;
1737 prefer = nid;
1738 }
1739
1740 /* Interleave this node? */
1741 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1742 node_set(nid, interleave_nodes);
1743 }
1744
1745 /* All too small, use the largest */
1746 if (unlikely(nodes_empty(interleave_nodes)))
1747 node_set(prefer, interleave_nodes);
1da177e4 1748
b71636e2 1749 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
1da177e4
LT
1750 printk("numa_policy_init: interleaving failed\n");
1751}
1752
8bccd85f 1753/* Reset policy of current process to default */
1da177e4
LT
1754void numa_default_policy(void)
1755{
8bccd85f 1756 do_set_mempolicy(MPOL_DEFAULT, NULL);
1da177e4 1757}
68860ec1
PJ
1758
1759/* Migrate a policy to a different set of nodes */
dbcb0f19
AB
1760static void mpol_rebind_policy(struct mempolicy *pol,
1761 const nodemask_t *newmask)
68860ec1 1762{
74cb2155 1763 nodemask_t *mpolmask;
68860ec1
PJ
1764 nodemask_t tmp;
1765
1766 if (!pol)
1767 return;
74cb2155
PJ
1768 mpolmask = &pol->cpuset_mems_allowed;
1769 if (nodes_equal(*mpolmask, *newmask))
1770 return;
68860ec1
PJ
1771
1772 switch (pol->policy) {
1773 case MPOL_DEFAULT:
1774 break;
19770b32
MG
1775 case MPOL_BIND:
1776 /* Fall through */
68860ec1 1777 case MPOL_INTERLEAVE:
74cb2155 1778 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
68860ec1 1779 pol->v.nodes = tmp;
74cb2155
PJ
1780 *mpolmask = *newmask;
1781 current->il_next = node_remap(current->il_next,
1782 *mpolmask, *newmask);
68860ec1
PJ
1783 break;
1784 case MPOL_PREFERRED:
1785 pol->v.preferred_node = node_remap(pol->v.preferred_node,
74cb2155
PJ
1786 *mpolmask, *newmask);
1787 *mpolmask = *newmask;
68860ec1 1788 break;
68860ec1
PJ
1789 default:
1790 BUG();
1791 break;
1792 }
1793}
1794
1795/*
74cb2155
PJ
1796 * Wrapper for mpol_rebind_policy() that just requires task
1797 * pointer, and updates task mempolicy.
68860ec1 1798 */
74cb2155
PJ
1799
1800void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
68860ec1 1801{
74cb2155 1802 mpol_rebind_policy(tsk->mempolicy, new);
68860ec1 1803}
1a75a6c8 1804
4225399a
PJ
1805/*
1806 * Rebind each vma in mm to new nodemask.
1807 *
1808 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1809 */
1810
1811void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1812{
1813 struct vm_area_struct *vma;
1814
1815 down_write(&mm->mmap_sem);
1816 for (vma = mm->mmap; vma; vma = vma->vm_next)
1817 mpol_rebind_policy(vma->vm_policy, new);
1818 up_write(&mm->mmap_sem);
1819}
1820
1a75a6c8
CL
1821/*
1822 * Display pages allocated per node and memory policy via /proc.
1823 */
1824
15ad7cdc
HD
1825static const char * const policy_types[] =
1826 { "default", "prefer", "bind", "interleave" };
1a75a6c8
CL
1827
1828/*
1829 * Convert a mempolicy into a string.
1830 * Returns the number of characters in buffer (if positive)
1831 * or an error (negative)
1832 */
1833static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1834{
1835 char *p = buffer;
1836 int l;
1837 nodemask_t nodes;
a3b51e01 1838 unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
1a75a6c8
CL
1839
1840 switch (mode) {
1841 case MPOL_DEFAULT:
1842 nodes_clear(nodes);
1843 break;
1844
1845 case MPOL_PREFERRED:
1846 nodes_clear(nodes);
1847 node_set(pol->v.preferred_node, nodes);
1848 break;
1849
1850 case MPOL_BIND:
19770b32 1851 /* Fall through */
1a75a6c8
CL
1852 case MPOL_INTERLEAVE:
1853 nodes = pol->v.nodes;
1854 break;
1855
1856 default:
1857 BUG();
1858 return -EFAULT;
1859 }
1860
1861 l = strlen(policy_types[mode]);
1862 if (buffer + maxlen < p + l + 1)
1863 return -ENOSPC;
1864
1865 strcpy(p, policy_types[mode]);
1866 p += l;
1867
1868 if (!nodes_empty(nodes)) {
1869 if (buffer + maxlen < p + 2)
1870 return -ENOSPC;
1871 *p++ = '=';
1872 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1873 }
1874 return p - buffer;
1875}
1876
1877struct numa_maps {
1878 unsigned long pages;
1879 unsigned long anon;
397874df
CL
1880 unsigned long active;
1881 unsigned long writeback;
1a75a6c8 1882 unsigned long mapcount_max;
397874df
CL
1883 unsigned long dirty;
1884 unsigned long swapcache;
1a75a6c8
CL
1885 unsigned long node[MAX_NUMNODES];
1886};
1887
397874df 1888static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
1889{
1890 struct numa_maps *md = private;
1891 int count = page_mapcount(page);
1892
397874df
CL
1893 md->pages++;
1894 if (pte_dirty || PageDirty(page))
1895 md->dirty++;
1a75a6c8 1896
397874df
CL
1897 if (PageSwapCache(page))
1898 md->swapcache++;
1a75a6c8 1899
397874df
CL
1900 if (PageActive(page))
1901 md->active++;
1902
1903 if (PageWriteback(page))
1904 md->writeback++;
1a75a6c8
CL
1905
1906 if (PageAnon(page))
1907 md->anon++;
1908
397874df
CL
1909 if (count > md->mapcount_max)
1910 md->mapcount_max = count;
1911
1a75a6c8 1912 md->node[page_to_nid(page)]++;
1a75a6c8
CL
1913}
1914
7f709ed0 1915#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
1916static void check_huge_range(struct vm_area_struct *vma,
1917 unsigned long start, unsigned long end,
1918 struct numa_maps *md)
1919{
1920 unsigned long addr;
1921 struct page *page;
1922
1923 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1924 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1925 pte_t pte;
1926
1927 if (!ptep)
1928 continue;
1929
1930 pte = *ptep;
1931 if (pte_none(pte))
1932 continue;
1933
1934 page = pte_page(pte);
1935 if (!page)
1936 continue;
1937
1938 gather_stats(page, md, pte_dirty(*ptep));
1939 }
1940}
7f709ed0
AM
1941#else
1942static inline void check_huge_range(struct vm_area_struct *vma,
1943 unsigned long start, unsigned long end,
1944 struct numa_maps *md)
1945{
1946}
1947#endif
397874df 1948
1a75a6c8
CL
1949int show_numa_map(struct seq_file *m, void *v)
1950{
99f89551 1951 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
1952 struct vm_area_struct *vma = v;
1953 struct numa_maps *md;
397874df
CL
1954 struct file *file = vma->vm_file;
1955 struct mm_struct *mm = vma->vm_mm;
480eccf9 1956 struct mempolicy *pol;
1a75a6c8
CL
1957 int n;
1958 char buffer[50];
1959
397874df 1960 if (!mm)
1a75a6c8
CL
1961 return 0;
1962
1963 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1964 if (!md)
1965 return 0;
1966
480eccf9
LS
1967 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1968 mpol_to_str(buffer, sizeof(buffer), pol);
1969 /*
1970 * unref shared or other task's mempolicy
1971 */
1972 if (pol != &default_policy && pol != current->mempolicy)
1973 __mpol_free(pol);
397874df
CL
1974
1975 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1976
1977 if (file) {
1978 seq_printf(m, " file=");
c32c2f63 1979 seq_path(m, &file->f_path, "\n\t= ");
397874df
CL
1980 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1981 seq_printf(m, " heap");
1982 } else if (vma->vm_start <= mm->start_stack &&
1983 vma->vm_end >= mm->start_stack) {
1984 seq_printf(m, " stack");
1985 }
1986
1987 if (is_vm_hugetlb_page(vma)) {
1988 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1989 seq_printf(m, " huge");
1990 } else {
a57ebfdb 1991 check_pgd_range(vma, vma->vm_start, vma->vm_end,
56bbd65d 1992 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
397874df
CL
1993 }
1994
1995 if (!md->pages)
1996 goto out;
1a75a6c8 1997
397874df
CL
1998 if (md->anon)
1999 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 2000
397874df
CL
2001 if (md->dirty)
2002 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 2003
397874df
CL
2004 if (md->pages != md->anon && md->pages != md->dirty)
2005 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 2006
397874df
CL
2007 if (md->mapcount_max > 1)
2008 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 2009
397874df
CL
2010 if (md->swapcache)
2011 seq_printf(m," swapcache=%lu", md->swapcache);
2012
2013 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2014 seq_printf(m," active=%lu", md->active);
2015
2016 if (md->writeback)
2017 seq_printf(m," writeback=%lu", md->writeback);
2018
56bbd65d 2019 for_each_node_state(n, N_HIGH_MEMORY)
397874df
CL
2020 if (md->node[n])
2021 seq_printf(m, " N%d=%lu", n, md->node[n]);
2022out:
2023 seq_putc(m, '\n');
1a75a6c8
CL
2024 kfree(md);
2025
2026 if (m->count < m->size)
99f89551 2027 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
2028 return 0;
2029}