]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/mempolicy.c
mempolicy: remove redundant code
[net-next-2.6.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
1da177e4
LT
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
1da177e4
LT
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/module.h>
b488893a 79#include <linux/nsproxy.h>
1da177e4
LT
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
dc9aa5b9 83#include <linux/swap.h>
1a75a6c8
CL
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
b20a3503 86#include <linux/migrate.h>
62b61f61 87#include <linux/ksm.h>
95a402c3 88#include <linux/rmap.h>
86c3a764 89#include <linux/security.h>
dbcb0f19 90#include <linux/syscalls.h>
095f1fc4 91#include <linux/ctype.h>
6d9c285a 92#include <linux/mm_inline.h>
dc9aa5b9 93
1da177e4
LT
94#include <asm/tlbflush.h>
95#include <asm/uaccess.h>
96
62695a84
NP
97#include "internal.h"
98
38e35860 99/* Internal flags */
dc9aa5b9 100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 102#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 103
fcc234f8
PE
104static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache;
1da177e4 106
1da177e4
LT
107/* Highest zone. An specific allocation for a zone below that is not
108 policied. */
6267276f 109enum zone_type policy_zone = 0;
1da177e4 110
bea904d5
LS
111/*
112 * run-time system-wide default policy => local allocation
113 */
d42c6997 114struct mempolicy default_policy = {
1da177e4 115 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 116 .mode = MPOL_PREFERRED,
fc36b8d3 117 .flags = MPOL_F_LOCAL,
1da177e4
LT
118};
119
37012946
DR
120static const struct mempolicy_operations {
121 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
123} mpol_ops[MPOL_MAX];
124
19770b32 125/* Check that the nodemask contains at least one populated zone */
37012946 126static int is_valid_nodemask(const nodemask_t *nodemask)
1da177e4 127{
19770b32 128 int nd, k;
1da177e4 129
19770b32
MG
130 for_each_node_mask(nd, *nodemask) {
131 struct zone *z;
132
133 for (k = 0; k <= policy_zone; k++) {
134 z = &NODE_DATA(nd)->node_zones[k];
135 if (z->present_pages > 0)
136 return 1;
dd942ae3 137 }
8af5e2eb 138 }
19770b32
MG
139
140 return 0;
1da177e4
LT
141}
142
f5b087b5
DR
143static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
144{
6d556294 145 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
146}
147
148static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
149 const nodemask_t *rel)
150{
151 nodemask_t tmp;
152 nodes_fold(tmp, *orig, nodes_weight(*rel));
153 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
154}
155
37012946
DR
156static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
157{
158 if (nodes_empty(*nodes))
159 return -EINVAL;
160 pol->v.nodes = *nodes;
161 return 0;
162}
163
164static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
165{
166 if (!nodes)
fc36b8d3 167 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
168 else if (nodes_empty(*nodes))
169 return -EINVAL; /* no allowed nodes */
170 else
171 pol->v.preferred_node = first_node(*nodes);
172 return 0;
173}
174
175static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
176{
177 if (!is_valid_nodemask(nodes))
178 return -EINVAL;
179 pol->v.nodes = *nodes;
180 return 0;
181}
182
58568d2a
MX
183/*
184 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
185 * any, for the new policy. mpol_new() has already validated the nodes
186 * parameter with respect to the policy mode and flags. But, we need to
187 * handle an empty nodemask with MPOL_PREFERRED here.
188 *
189 * Must be called holding task's alloc_lock to protect task's mems_allowed
190 * and mempolicy. May also be called holding the mmap_semaphore for write.
191 */
4bfc4495
KH
192static int mpol_set_nodemask(struct mempolicy *pol,
193 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 194{
58568d2a
MX
195 int ret;
196
197 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
198 if (pol == NULL)
199 return 0;
4bfc4495
KH
200 /* Check N_HIGH_MEMORY */
201 nodes_and(nsc->mask1,
202 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
58568d2a
MX
203
204 VM_BUG_ON(!nodes);
205 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
206 nodes = NULL; /* explicit local allocation */
207 else {
208 if (pol->flags & MPOL_F_RELATIVE_NODES)
4bfc4495 209 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
58568d2a 210 else
4bfc4495
KH
211 nodes_and(nsc->mask2, *nodes, nsc->mask1);
212
58568d2a
MX
213 if (mpol_store_user_nodemask(pol))
214 pol->w.user_nodemask = *nodes;
215 else
216 pol->w.cpuset_mems_allowed =
217 cpuset_current_mems_allowed;
218 }
219
4bfc4495
KH
220 if (nodes)
221 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
222 else
223 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
224 return ret;
225}
226
227/*
228 * This function just creates a new policy, does some check and simple
229 * initialization. You must invoke mpol_set_nodemask() to set nodes.
230 */
028fec41
DR
231static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
232 nodemask_t *nodes)
1da177e4
LT
233{
234 struct mempolicy *policy;
235
028fec41
DR
236 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
237 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
140d5a49 238
3e1f0645
DR
239 if (mode == MPOL_DEFAULT) {
240 if (nodes && !nodes_empty(*nodes))
37012946 241 return ERR_PTR(-EINVAL);
bea904d5 242 return NULL; /* simply delete any existing policy */
37012946 243 }
3e1f0645
DR
244 VM_BUG_ON(!nodes);
245
246 /*
247 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
248 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
249 * All other modes require a valid pointer to a non-empty nodemask.
250 */
251 if (mode == MPOL_PREFERRED) {
252 if (nodes_empty(*nodes)) {
253 if (((flags & MPOL_F_STATIC_NODES) ||
254 (flags & MPOL_F_RELATIVE_NODES)))
255 return ERR_PTR(-EINVAL);
3e1f0645
DR
256 }
257 } else if (nodes_empty(*nodes))
258 return ERR_PTR(-EINVAL);
1da177e4
LT
259 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
260 if (!policy)
261 return ERR_PTR(-ENOMEM);
262 atomic_set(&policy->refcnt, 1);
45c4745a 263 policy->mode = mode;
3e1f0645 264 policy->flags = flags;
37012946 265
1da177e4 266 return policy;
37012946
DR
267}
268
52cd3b07
LS
269/* Slow path of a mpol destructor. */
270void __mpol_put(struct mempolicy *p)
271{
272 if (!atomic_dec_and_test(&p->refcnt))
273 return;
52cd3b07
LS
274 kmem_cache_free(policy_cache, p);
275}
276
37012946
DR
277static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
278{
279}
280
281static void mpol_rebind_nodemask(struct mempolicy *pol,
282 const nodemask_t *nodes)
283{
284 nodemask_t tmp;
285
286 if (pol->flags & MPOL_F_STATIC_NODES)
287 nodes_and(tmp, pol->w.user_nodemask, *nodes);
288 else if (pol->flags & MPOL_F_RELATIVE_NODES)
289 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
290 else {
291 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
292 *nodes);
293 pol->w.cpuset_mems_allowed = *nodes;
294 }
f5b087b5 295
37012946
DR
296 pol->v.nodes = tmp;
297 if (!node_isset(current->il_next, tmp)) {
298 current->il_next = next_node(current->il_next, tmp);
299 if (current->il_next >= MAX_NUMNODES)
300 current->il_next = first_node(tmp);
301 if (current->il_next >= MAX_NUMNODES)
302 current->il_next = numa_node_id();
303 }
304}
305
306static void mpol_rebind_preferred(struct mempolicy *pol,
307 const nodemask_t *nodes)
308{
309 nodemask_t tmp;
310
37012946
DR
311 if (pol->flags & MPOL_F_STATIC_NODES) {
312 int node = first_node(pol->w.user_nodemask);
313
fc36b8d3 314 if (node_isset(node, *nodes)) {
37012946 315 pol->v.preferred_node = node;
fc36b8d3
LS
316 pol->flags &= ~MPOL_F_LOCAL;
317 } else
318 pol->flags |= MPOL_F_LOCAL;
37012946
DR
319 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
320 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
321 pol->v.preferred_node = first_node(tmp);
fc36b8d3 322 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
323 pol->v.preferred_node = node_remap(pol->v.preferred_node,
324 pol->w.cpuset_mems_allowed,
325 *nodes);
326 pol->w.cpuset_mems_allowed = *nodes;
327 }
1da177e4
LT
328}
329
1d0d2680
DR
330/* Migrate a policy to a different set of nodes */
331static void mpol_rebind_policy(struct mempolicy *pol,
332 const nodemask_t *newmask)
333{
1d0d2680
DR
334 if (!pol)
335 return;
1d0d2680
DR
336 if (!mpol_store_user_nodemask(pol) &&
337 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
338 return;
45c4745a 339 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
340}
341
342/*
343 * Wrapper for mpol_rebind_policy() that just requires task
344 * pointer, and updates task mempolicy.
58568d2a
MX
345 *
346 * Called with task's alloc_lock held.
1d0d2680
DR
347 */
348
349void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
350{
351 mpol_rebind_policy(tsk->mempolicy, new);
352}
353
354/*
355 * Rebind each vma in mm to new nodemask.
356 *
357 * Call holding a reference to mm. Takes mm->mmap_sem during call.
358 */
359
360void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
361{
362 struct vm_area_struct *vma;
363
364 down_write(&mm->mmap_sem);
365 for (vma = mm->mmap; vma; vma = vma->vm_next)
366 mpol_rebind_policy(vma->vm_policy, new);
367 up_write(&mm->mmap_sem);
368}
369
37012946
DR
370static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
371 [MPOL_DEFAULT] = {
372 .rebind = mpol_rebind_default,
373 },
374 [MPOL_INTERLEAVE] = {
375 .create = mpol_new_interleave,
376 .rebind = mpol_rebind_nodemask,
377 },
378 [MPOL_PREFERRED] = {
379 .create = mpol_new_preferred,
380 .rebind = mpol_rebind_preferred,
381 },
382 [MPOL_BIND] = {
383 .create = mpol_new_bind,
384 .rebind = mpol_rebind_nodemask,
385 },
386};
387
397874df 388static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
389static void migrate_page_add(struct page *page, struct list_head *pagelist,
390 unsigned long flags);
1a75a6c8 391
38e35860 392/* Scan through pages checking if pages follow certain conditions. */
b5810039 393static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
394 unsigned long addr, unsigned long end,
395 const nodemask_t *nodes, unsigned long flags,
38e35860 396 void *private)
1da177e4 397{
91612e0d
HD
398 pte_t *orig_pte;
399 pte_t *pte;
705e87c0 400 spinlock_t *ptl;
941150a3 401
705e87c0 402 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 403 do {
6aab341e 404 struct page *page;
25ba77c1 405 int nid;
91612e0d
HD
406
407 if (!pte_present(*pte))
1da177e4 408 continue;
6aab341e
LT
409 page = vm_normal_page(vma, addr, *pte);
410 if (!page)
1da177e4 411 continue;
053837fc 412 /*
62b61f61
HD
413 * vm_normal_page() filters out zero pages, but there might
414 * still be PageReserved pages to skip, perhaps in a VDSO.
415 * And we cannot move PageKsm pages sensibly or safely yet.
053837fc 416 */
62b61f61 417 if (PageReserved(page) || PageKsm(page))
f4598c8b 418 continue;
6aab341e 419 nid = page_to_nid(page);
38e35860
CL
420 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
421 continue;
422
1a75a6c8 423 if (flags & MPOL_MF_STATS)
397874df 424 gather_stats(page, private, pte_dirty(*pte));
053837fc 425 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 426 migrate_page_add(page, private, flags);
38e35860
CL
427 else
428 break;
91612e0d 429 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 430 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
431 return addr != end;
432}
433
b5810039 434static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
435 unsigned long addr, unsigned long end,
436 const nodemask_t *nodes, unsigned long flags,
38e35860 437 void *private)
91612e0d
HD
438{
439 pmd_t *pmd;
440 unsigned long next;
441
442 pmd = pmd_offset(pud, addr);
443 do {
444 next = pmd_addr_end(addr, end);
445 if (pmd_none_or_clear_bad(pmd))
446 continue;
dc9aa5b9 447 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 448 flags, private))
91612e0d
HD
449 return -EIO;
450 } while (pmd++, addr = next, addr != end);
451 return 0;
452}
453
b5810039 454static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
455 unsigned long addr, unsigned long end,
456 const nodemask_t *nodes, unsigned long flags,
38e35860 457 void *private)
91612e0d
HD
458{
459 pud_t *pud;
460 unsigned long next;
461
462 pud = pud_offset(pgd, addr);
463 do {
464 next = pud_addr_end(addr, end);
465 if (pud_none_or_clear_bad(pud))
466 continue;
dc9aa5b9 467 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 468 flags, private))
91612e0d
HD
469 return -EIO;
470 } while (pud++, addr = next, addr != end);
471 return 0;
472}
473
b5810039 474static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
475 unsigned long addr, unsigned long end,
476 const nodemask_t *nodes, unsigned long flags,
38e35860 477 void *private)
91612e0d
HD
478{
479 pgd_t *pgd;
480 unsigned long next;
481
b5810039 482 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
483 do {
484 next = pgd_addr_end(addr, end);
485 if (pgd_none_or_clear_bad(pgd))
486 continue;
dc9aa5b9 487 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 488 flags, private))
91612e0d
HD
489 return -EIO;
490 } while (pgd++, addr = next, addr != end);
491 return 0;
1da177e4
LT
492}
493
dc9aa5b9
CL
494/*
495 * Check if all pages in a range are on a set of nodes.
496 * If pagelist != NULL then isolate pages from the LRU and
497 * put them on the pagelist.
498 */
1da177e4
LT
499static struct vm_area_struct *
500check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 501 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
502{
503 int err;
504 struct vm_area_struct *first, *vma, *prev;
505
053837fc 506
1da177e4
LT
507 first = find_vma(mm, start);
508 if (!first)
509 return ERR_PTR(-EFAULT);
510 prev = NULL;
511 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
512 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
513 if (!vma->vm_next && vma->vm_end < end)
514 return ERR_PTR(-EFAULT);
515 if (prev && prev->vm_end < vma->vm_start)
516 return ERR_PTR(-EFAULT);
517 }
518 if (!is_vm_hugetlb_page(vma) &&
519 ((flags & MPOL_MF_STRICT) ||
520 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
521 vma_migratable(vma)))) {
5b952b3c 522 unsigned long endvma = vma->vm_end;
dc9aa5b9 523
5b952b3c
AK
524 if (endvma > end)
525 endvma = end;
526 if (vma->vm_start > start)
527 start = vma->vm_start;
dc9aa5b9 528 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 529 flags, private);
1da177e4
LT
530 if (err) {
531 first = ERR_PTR(err);
532 break;
533 }
534 }
535 prev = vma;
536 }
537 return first;
538}
539
540/* Apply policy to a single VMA */
541static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
542{
543 int err = 0;
544 struct mempolicy *old = vma->vm_policy;
545
140d5a49 546 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
1da177e4
LT
547 vma->vm_start, vma->vm_end, vma->vm_pgoff,
548 vma->vm_ops, vma->vm_file,
549 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
550
551 if (vma->vm_ops && vma->vm_ops->set_policy)
552 err = vma->vm_ops->set_policy(vma, new);
553 if (!err) {
554 mpol_get(new);
555 vma->vm_policy = new;
f0be3d32 556 mpol_put(old);
1da177e4
LT
557 }
558 return err;
559}
560
561/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
562static int mbind_range(struct mm_struct *mm, unsigned long start,
563 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
564{
565 struct vm_area_struct *next;
9d8cebd4
KM
566 struct vm_area_struct *prev;
567 struct vm_area_struct *vma;
568 int err = 0;
569 pgoff_t pgoff;
570 unsigned long vmstart;
571 unsigned long vmend;
1da177e4 572
9d8cebd4
KM
573 vma = find_vma_prev(mm, start, &prev);
574 if (!vma || vma->vm_start > start)
575 return -EFAULT;
576
577 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 578 next = vma->vm_next;
9d8cebd4
KM
579 vmstart = max(start, vma->vm_start);
580 vmend = min(end, vma->vm_end);
581
582 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
583 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
584 vma->anon_vma, vma->vm_file, pgoff, new_pol);
585 if (prev) {
586 vma = prev;
587 next = vma->vm_next;
588 continue;
589 }
590 if (vma->vm_start != vmstart) {
591 err = split_vma(vma->vm_mm, vma, vmstart, 1);
592 if (err)
593 goto out;
594 }
595 if (vma->vm_end != vmend) {
596 err = split_vma(vma->vm_mm, vma, vmend, 0);
597 if (err)
598 goto out;
599 }
600 err = policy_vma(vma, new_pol);
1da177e4 601 if (err)
9d8cebd4 602 goto out;
1da177e4 603 }
9d8cebd4
KM
604
605 out:
1da177e4
LT
606 return err;
607}
608
c61afb18
PJ
609/*
610 * Update task->flags PF_MEMPOLICY bit: set iff non-default
611 * mempolicy. Allows more rapid checking of this (combined perhaps
612 * with other PF_* flag bits) on memory allocation hot code paths.
613 *
614 * If called from outside this file, the task 'p' should -only- be
615 * a newly forked child not yet visible on the task list, because
616 * manipulating the task flags of a visible task is not safe.
617 *
618 * The above limitation is why this routine has the funny name
619 * mpol_fix_fork_child_flag().
620 *
621 * It is also safe to call this with a task pointer of current,
622 * which the static wrapper mpol_set_task_struct_flag() does,
623 * for use within this file.
624 */
625
626void mpol_fix_fork_child_flag(struct task_struct *p)
627{
628 if (p->mempolicy)
629 p->flags |= PF_MEMPOLICY;
630 else
631 p->flags &= ~PF_MEMPOLICY;
632}
633
634static void mpol_set_task_struct_flag(void)
635{
636 mpol_fix_fork_child_flag(current);
637}
638
1da177e4 639/* Set the process memory policy */
028fec41
DR
640static long do_set_mempolicy(unsigned short mode, unsigned short flags,
641 nodemask_t *nodes)
1da177e4 642{
58568d2a 643 struct mempolicy *new, *old;
f4e53d91 644 struct mm_struct *mm = current->mm;
4bfc4495 645 NODEMASK_SCRATCH(scratch);
58568d2a 646 int ret;
1da177e4 647
4bfc4495
KH
648 if (!scratch)
649 return -ENOMEM;
f4e53d91 650
4bfc4495
KH
651 new = mpol_new(mode, flags, nodes);
652 if (IS_ERR(new)) {
653 ret = PTR_ERR(new);
654 goto out;
655 }
f4e53d91
LS
656 /*
657 * prevent changing our mempolicy while show_numa_maps()
658 * is using it.
659 * Note: do_set_mempolicy() can be called at init time
660 * with no 'mm'.
661 */
662 if (mm)
663 down_write(&mm->mmap_sem);
58568d2a 664 task_lock(current);
4bfc4495 665 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
666 if (ret) {
667 task_unlock(current);
668 if (mm)
669 up_write(&mm->mmap_sem);
670 mpol_put(new);
4bfc4495 671 goto out;
58568d2a
MX
672 }
673 old = current->mempolicy;
1da177e4 674 current->mempolicy = new;
c61afb18 675 mpol_set_task_struct_flag();
45c4745a 676 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 677 nodes_weight(new->v.nodes))
dfcd3c0d 678 current->il_next = first_node(new->v.nodes);
58568d2a 679 task_unlock(current);
f4e53d91
LS
680 if (mm)
681 up_write(&mm->mmap_sem);
682
58568d2a 683 mpol_put(old);
4bfc4495
KH
684 ret = 0;
685out:
686 NODEMASK_SCRATCH_FREE(scratch);
687 return ret;
1da177e4
LT
688}
689
bea904d5
LS
690/*
691 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
692 *
693 * Called with task's alloc_lock held
bea904d5
LS
694 */
695static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 696{
dfcd3c0d 697 nodes_clear(*nodes);
bea904d5
LS
698 if (p == &default_policy)
699 return;
700
45c4745a 701 switch (p->mode) {
19770b32
MG
702 case MPOL_BIND:
703 /* Fall through */
1da177e4 704 case MPOL_INTERLEAVE:
dfcd3c0d 705 *nodes = p->v.nodes;
1da177e4
LT
706 break;
707 case MPOL_PREFERRED:
fc36b8d3 708 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 709 node_set(p->v.preferred_node, *nodes);
53f2556b 710 /* else return empty node mask for local allocation */
1da177e4
LT
711 break;
712 default:
713 BUG();
714 }
715}
716
717static int lookup_node(struct mm_struct *mm, unsigned long addr)
718{
719 struct page *p;
720 int err;
721
722 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
723 if (err >= 0) {
724 err = page_to_nid(p);
725 put_page(p);
726 }
727 return err;
728}
729
1da177e4 730/* Retrieve NUMA policy */
dbcb0f19
AB
731static long do_get_mempolicy(int *policy, nodemask_t *nmask,
732 unsigned long addr, unsigned long flags)
1da177e4 733{
8bccd85f 734 int err;
1da177e4
LT
735 struct mm_struct *mm = current->mm;
736 struct vm_area_struct *vma = NULL;
737 struct mempolicy *pol = current->mempolicy;
738
754af6f5
LS
739 if (flags &
740 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 741 return -EINVAL;
754af6f5
LS
742
743 if (flags & MPOL_F_MEMS_ALLOWED) {
744 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
745 return -EINVAL;
746 *policy = 0; /* just so it's initialized */
58568d2a 747 task_lock(current);
754af6f5 748 *nmask = cpuset_current_mems_allowed;
58568d2a 749 task_unlock(current);
754af6f5
LS
750 return 0;
751 }
752
1da177e4 753 if (flags & MPOL_F_ADDR) {
bea904d5
LS
754 /*
755 * Do NOT fall back to task policy if the
756 * vma/shared policy at addr is NULL. We
757 * want to return MPOL_DEFAULT in this case.
758 */
1da177e4
LT
759 down_read(&mm->mmap_sem);
760 vma = find_vma_intersection(mm, addr, addr+1);
761 if (!vma) {
762 up_read(&mm->mmap_sem);
763 return -EFAULT;
764 }
765 if (vma->vm_ops && vma->vm_ops->get_policy)
766 pol = vma->vm_ops->get_policy(vma, addr);
767 else
768 pol = vma->vm_policy;
769 } else if (addr)
770 return -EINVAL;
771
772 if (!pol)
bea904d5 773 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
774
775 if (flags & MPOL_F_NODE) {
776 if (flags & MPOL_F_ADDR) {
777 err = lookup_node(mm, addr);
778 if (err < 0)
779 goto out;
8bccd85f 780 *policy = err;
1da177e4 781 } else if (pol == current->mempolicy &&
45c4745a 782 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 783 *policy = current->il_next;
1da177e4
LT
784 } else {
785 err = -EINVAL;
786 goto out;
787 }
bea904d5
LS
788 } else {
789 *policy = pol == &default_policy ? MPOL_DEFAULT :
790 pol->mode;
d79df630
DR
791 /*
792 * Internal mempolicy flags must be masked off before exposing
793 * the policy to userspace.
794 */
795 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 796 }
1da177e4
LT
797
798 if (vma) {
799 up_read(&current->mm->mmap_sem);
800 vma = NULL;
801 }
802
1da177e4 803 err = 0;
58568d2a 804 if (nmask) {
c6b6ef8b
LS
805 if (mpol_store_user_nodemask(pol)) {
806 *nmask = pol->w.user_nodemask;
807 } else {
808 task_lock(current);
809 get_policy_nodemask(pol, nmask);
810 task_unlock(current);
811 }
58568d2a 812 }
1da177e4
LT
813
814 out:
52cd3b07 815 mpol_cond_put(pol);
1da177e4
LT
816 if (vma)
817 up_read(&current->mm->mmap_sem);
818 return err;
819}
820
b20a3503 821#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
822/*
823 * page migration
824 */
fc301289
CL
825static void migrate_page_add(struct page *page, struct list_head *pagelist,
826 unsigned long flags)
6ce3c4c0
CL
827{
828 /*
fc301289 829 * Avoid migrating a page that is shared with others.
6ce3c4c0 830 */
62695a84
NP
831 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
832 if (!isolate_lru_page(page)) {
833 list_add_tail(&page->lru, pagelist);
6d9c285a
KM
834 inc_zone_page_state(page, NR_ISOLATED_ANON +
835 page_is_file_cache(page));
62695a84
NP
836 }
837 }
7e2ab150 838}
6ce3c4c0 839
742755a1 840static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 841{
6484eb3e 842 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
843}
844
7e2ab150
CL
845/*
846 * Migrate pages from one node to a target node.
847 * Returns error or the number of pages not migrated.
848 */
dbcb0f19
AB
849static int migrate_to_node(struct mm_struct *mm, int source, int dest,
850 int flags)
7e2ab150
CL
851{
852 nodemask_t nmask;
853 LIST_HEAD(pagelist);
854 int err = 0;
855
856 nodes_clear(nmask);
857 node_set(source, nmask);
6ce3c4c0 858
7e2ab150
CL
859 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
860 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
861
aaa994b3 862 if (!list_empty(&pagelist))
62b61f61 863 err = migrate_pages(&pagelist, new_node_page, dest, 0);
95a402c3 864
7e2ab150 865 return err;
6ce3c4c0
CL
866}
867
39743889 868/*
7e2ab150
CL
869 * Move pages between the two nodesets so as to preserve the physical
870 * layout as much as possible.
39743889
CL
871 *
872 * Returns the number of page that could not be moved.
873 */
874int do_migrate_pages(struct mm_struct *mm,
875 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
876{
7e2ab150 877 int busy = 0;
0aedadf9 878 int err;
7e2ab150 879 nodemask_t tmp;
39743889 880
0aedadf9
CL
881 err = migrate_prep();
882 if (err)
883 return err;
884
53f2556b 885 down_read(&mm->mmap_sem);
39743889 886
7b2259b3
CL
887 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
888 if (err)
889 goto out;
890
da0aa138
KM
891 /*
892 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
893 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
894 * bit in 'tmp', and return that <source, dest> pair for migration.
895 * The pair of nodemasks 'to' and 'from' define the map.
896 *
897 * If no pair of bits is found that way, fallback to picking some
898 * pair of 'source' and 'dest' bits that are not the same. If the
899 * 'source' and 'dest' bits are the same, this represents a node
900 * that will be migrating to itself, so no pages need move.
901 *
902 * If no bits are left in 'tmp', or if all remaining bits left
903 * in 'tmp' correspond to the same bit in 'to', return false
904 * (nothing left to migrate).
905 *
906 * This lets us pick a pair of nodes to migrate between, such that
907 * if possible the dest node is not already occupied by some other
908 * source node, minimizing the risk of overloading the memory on a
909 * node that would happen if we migrated incoming memory to a node
910 * before migrating outgoing memory source that same node.
911 *
912 * A single scan of tmp is sufficient. As we go, we remember the
913 * most recent <s, d> pair that moved (s != d). If we find a pair
914 * that not only moved, but what's better, moved to an empty slot
915 * (d is not set in tmp), then we break out then, with that pair.
916 * Otherwise when we finish scannng from_tmp, we at least have the
917 * most recent <s, d> pair that moved. If we get all the way through
918 * the scan of tmp without finding any node that moved, much less
919 * moved to an empty node, then there is nothing left worth migrating.
920 */
d4984711 921
7e2ab150
CL
922 tmp = *from_nodes;
923 while (!nodes_empty(tmp)) {
924 int s,d;
925 int source = -1;
926 int dest = 0;
927
928 for_each_node_mask(s, tmp) {
929 d = node_remap(s, *from_nodes, *to_nodes);
930 if (s == d)
931 continue;
932
933 source = s; /* Node moved. Memorize */
934 dest = d;
935
936 /* dest not in remaining from nodes? */
937 if (!node_isset(dest, tmp))
938 break;
939 }
940 if (source == -1)
941 break;
942
943 node_clear(source, tmp);
944 err = migrate_to_node(mm, source, dest, flags);
945 if (err > 0)
946 busy += err;
947 if (err < 0)
948 break;
39743889 949 }
7b2259b3 950out:
39743889 951 up_read(&mm->mmap_sem);
7e2ab150
CL
952 if (err < 0)
953 return err;
954 return busy;
b20a3503
CL
955
956}
957
3ad33b24
LS
958/*
959 * Allocate a new page for page migration based on vma policy.
960 * Start assuming that page is mapped by vma pointed to by @private.
961 * Search forward from there, if not. N.B., this assumes that the
962 * list of pages handed to migrate_pages()--which is how we get here--
963 * is in virtual address order.
964 */
742755a1 965static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
966{
967 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 968 unsigned long uninitialized_var(address);
95a402c3 969
3ad33b24
LS
970 while (vma) {
971 address = page_address_in_vma(page, vma);
972 if (address != -EFAULT)
973 break;
974 vma = vma->vm_next;
975 }
976
977 /*
978 * if !vma, alloc_page_vma() will use task or system default policy
979 */
980 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 981}
b20a3503
CL
982#else
983
984static void migrate_page_add(struct page *page, struct list_head *pagelist,
985 unsigned long flags)
986{
39743889
CL
987}
988
b20a3503
CL
989int do_migrate_pages(struct mm_struct *mm,
990 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
991{
992 return -ENOSYS;
993}
95a402c3 994
69939749 995static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
996{
997 return NULL;
998}
b20a3503
CL
999#endif
1000
dbcb0f19 1001static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1002 unsigned short mode, unsigned short mode_flags,
1003 nodemask_t *nmask, unsigned long flags)
6ce3c4c0
CL
1004{
1005 struct vm_area_struct *vma;
1006 struct mm_struct *mm = current->mm;
1007 struct mempolicy *new;
1008 unsigned long end;
1009 int err;
1010 LIST_HEAD(pagelist);
1011
a3b51e01
DR
1012 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1013 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6ce3c4c0 1014 return -EINVAL;
74c00241 1015 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1016 return -EPERM;
1017
1018 if (start & ~PAGE_MASK)
1019 return -EINVAL;
1020
1021 if (mode == MPOL_DEFAULT)
1022 flags &= ~MPOL_MF_STRICT;
1023
1024 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1025 end = start + len;
1026
1027 if (end < start)
1028 return -EINVAL;
1029 if (end == start)
1030 return 0;
1031
028fec41 1032 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1033 if (IS_ERR(new))
1034 return PTR_ERR(new);
1035
1036 /*
1037 * If we are using the default policy then operation
1038 * on discontinuous address spaces is okay after all
1039 */
1040 if (!new)
1041 flags |= MPOL_MF_DISCONTIG_OK;
1042
028fec41
DR
1043 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1044 start, start + len, mode, mode_flags,
1045 nmask ? nodes_addr(*nmask)[0] : -1);
6ce3c4c0 1046
0aedadf9
CL
1047 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1048
1049 err = migrate_prep();
1050 if (err)
b05ca738 1051 goto mpol_out;
0aedadf9 1052 }
4bfc4495
KH
1053 {
1054 NODEMASK_SCRATCH(scratch);
1055 if (scratch) {
1056 down_write(&mm->mmap_sem);
1057 task_lock(current);
1058 err = mpol_set_nodemask(new, nmask, scratch);
1059 task_unlock(current);
1060 if (err)
1061 up_write(&mm->mmap_sem);
1062 } else
1063 err = -ENOMEM;
1064 NODEMASK_SCRATCH_FREE(scratch);
1065 }
b05ca738
KM
1066 if (err)
1067 goto mpol_out;
1068
6ce3c4c0
CL
1069 vma = check_range(mm, start, end, nmask,
1070 flags | MPOL_MF_INVERT, &pagelist);
1071
1072 err = PTR_ERR(vma);
1073 if (!IS_ERR(vma)) {
1074 int nr_failed = 0;
1075
9d8cebd4 1076 err = mbind_range(mm, start, end, new);
7e2ab150 1077
6ce3c4c0 1078 if (!list_empty(&pagelist))
95a402c3 1079 nr_failed = migrate_pages(&pagelist, new_vma_page,
62b61f61 1080 (unsigned long)vma, 0);
6ce3c4c0
CL
1081
1082 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1083 err = -EIO;
ab8a3e14
KM
1084 } else
1085 putback_lru_pages(&pagelist);
b20a3503 1086
6ce3c4c0 1087 up_write(&mm->mmap_sem);
b05ca738 1088 mpol_out:
f0be3d32 1089 mpol_put(new);
6ce3c4c0
CL
1090 return err;
1091}
1092
8bccd85f
CL
1093/*
1094 * User space interface with variable sized bitmaps for nodelists.
1095 */
1096
1097/* Copy a node mask from user space. */
39743889 1098static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1099 unsigned long maxnode)
1100{
1101 unsigned long k;
1102 unsigned long nlongs;
1103 unsigned long endmask;
1104
1105 --maxnode;
1106 nodes_clear(*nodes);
1107 if (maxnode == 0 || !nmask)
1108 return 0;
a9c930ba 1109 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1110 return -EINVAL;
8bccd85f
CL
1111
1112 nlongs = BITS_TO_LONGS(maxnode);
1113 if ((maxnode % BITS_PER_LONG) == 0)
1114 endmask = ~0UL;
1115 else
1116 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1117
1118 /* When the user specified more nodes than supported just check
1119 if the non supported part is all zero. */
1120 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1121 if (nlongs > PAGE_SIZE/sizeof(long))
1122 return -EINVAL;
1123 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1124 unsigned long t;
1125 if (get_user(t, nmask + k))
1126 return -EFAULT;
1127 if (k == nlongs - 1) {
1128 if (t & endmask)
1129 return -EINVAL;
1130 } else if (t)
1131 return -EINVAL;
1132 }
1133 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1134 endmask = ~0UL;
1135 }
1136
1137 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1138 return -EFAULT;
1139 nodes_addr(*nodes)[nlongs-1] &= endmask;
1140 return 0;
1141}
1142
1143/* Copy a kernel node mask to user space */
1144static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1145 nodemask_t *nodes)
1146{
1147 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1148 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1149
1150 if (copy > nbytes) {
1151 if (copy > PAGE_SIZE)
1152 return -EINVAL;
1153 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1154 return -EFAULT;
1155 copy = nbytes;
1156 }
1157 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1158}
1159
938bb9f5
HC
1160SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1161 unsigned long, mode, unsigned long __user *, nmask,
1162 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1163{
1164 nodemask_t nodes;
1165 int err;
028fec41 1166 unsigned short mode_flags;
8bccd85f 1167
028fec41
DR
1168 mode_flags = mode & MPOL_MODE_FLAGS;
1169 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1170 if (mode >= MPOL_MAX)
1171 return -EINVAL;
4c50bc01
DR
1172 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1173 (mode_flags & MPOL_F_RELATIVE_NODES))
1174 return -EINVAL;
8bccd85f
CL
1175 err = get_nodes(&nodes, nmask, maxnode);
1176 if (err)
1177 return err;
028fec41 1178 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1179}
1180
1181/* Set the process memory policy */
938bb9f5
HC
1182SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1183 unsigned long, maxnode)
8bccd85f
CL
1184{
1185 int err;
1186 nodemask_t nodes;
028fec41 1187 unsigned short flags;
8bccd85f 1188
028fec41
DR
1189 flags = mode & MPOL_MODE_FLAGS;
1190 mode &= ~MPOL_MODE_FLAGS;
1191 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1192 return -EINVAL;
4c50bc01
DR
1193 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1194 return -EINVAL;
8bccd85f
CL
1195 err = get_nodes(&nodes, nmask, maxnode);
1196 if (err)
1197 return err;
028fec41 1198 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1199}
1200
938bb9f5
HC
1201SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1202 const unsigned long __user *, old_nodes,
1203 const unsigned long __user *, new_nodes)
39743889 1204{
c69e8d9c 1205 const struct cred *cred = current_cred(), *tcred;
39743889
CL
1206 struct mm_struct *mm;
1207 struct task_struct *task;
1208 nodemask_t old;
1209 nodemask_t new;
1210 nodemask_t task_nodes;
1211 int err;
1212
1213 err = get_nodes(&old, old_nodes, maxnode);
1214 if (err)
1215 return err;
1216
1217 err = get_nodes(&new, new_nodes, maxnode);
1218 if (err)
1219 return err;
1220
1221 /* Find the mm_struct */
1222 read_lock(&tasklist_lock);
228ebcbe 1223 task = pid ? find_task_by_vpid(pid) : current;
39743889
CL
1224 if (!task) {
1225 read_unlock(&tasklist_lock);
1226 return -ESRCH;
1227 }
1228 mm = get_task_mm(task);
1229 read_unlock(&tasklist_lock);
1230
1231 if (!mm)
1232 return -EINVAL;
1233
1234 /*
1235 * Check if this process has the right to modify the specified
1236 * process. The right exists if the process has administrative
7f927fcc 1237 * capabilities, superuser privileges or the same
39743889
CL
1238 * userid as the target process.
1239 */
c69e8d9c
DH
1240 rcu_read_lock();
1241 tcred = __task_cred(task);
b6dff3ec
DH
1242 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1243 cred->uid != tcred->suid && cred->uid != tcred->uid &&
74c00241 1244 !capable(CAP_SYS_NICE)) {
c69e8d9c 1245 rcu_read_unlock();
39743889
CL
1246 err = -EPERM;
1247 goto out;
1248 }
c69e8d9c 1249 rcu_read_unlock();
39743889
CL
1250
1251 task_nodes = cpuset_mems_allowed(task);
1252 /* Is the user allowed to access the target nodes? */
74c00241 1253 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
1254 err = -EPERM;
1255 goto out;
1256 }
1257
37b07e41 1258 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
3b42d28b
CL
1259 err = -EINVAL;
1260 goto out;
1261 }
1262
86c3a764
DQ
1263 err = security_task_movememory(task);
1264 if (err)
1265 goto out;
1266
511030bc 1267 err = do_migrate_pages(mm, &old, &new,
74c00241 1268 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
1269out:
1270 mmput(mm);
1271 return err;
1272}
1273
1274
8bccd85f 1275/* Retrieve NUMA policy */
938bb9f5
HC
1276SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1277 unsigned long __user *, nmask, unsigned long, maxnode,
1278 unsigned long, addr, unsigned long, flags)
8bccd85f 1279{
dbcb0f19
AB
1280 int err;
1281 int uninitialized_var(pval);
8bccd85f
CL
1282 nodemask_t nodes;
1283
1284 if (nmask != NULL && maxnode < MAX_NUMNODES)
1285 return -EINVAL;
1286
1287 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1288
1289 if (err)
1290 return err;
1291
1292 if (policy && put_user(pval, policy))
1293 return -EFAULT;
1294
1295 if (nmask)
1296 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1297
1298 return err;
1299}
1300
1da177e4
LT
1301#ifdef CONFIG_COMPAT
1302
1303asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1304 compat_ulong_t __user *nmask,
1305 compat_ulong_t maxnode,
1306 compat_ulong_t addr, compat_ulong_t flags)
1307{
1308 long err;
1309 unsigned long __user *nm = NULL;
1310 unsigned long nr_bits, alloc_size;
1311 DECLARE_BITMAP(bm, MAX_NUMNODES);
1312
1313 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1314 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1315
1316 if (nmask)
1317 nm = compat_alloc_user_space(alloc_size);
1318
1319 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1320
1321 if (!err && nmask) {
1322 err = copy_from_user(bm, nm, alloc_size);
1323 /* ensure entire bitmap is zeroed */
1324 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1325 err |= compat_put_bitmap(nmask, bm, nr_bits);
1326 }
1327
1328 return err;
1329}
1330
1331asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1332 compat_ulong_t maxnode)
1333{
1334 long err = 0;
1335 unsigned long __user *nm = NULL;
1336 unsigned long nr_bits, alloc_size;
1337 DECLARE_BITMAP(bm, MAX_NUMNODES);
1338
1339 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1340 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1341
1342 if (nmask) {
1343 err = compat_get_bitmap(bm, nmask, nr_bits);
1344 nm = compat_alloc_user_space(alloc_size);
1345 err |= copy_to_user(nm, bm, alloc_size);
1346 }
1347
1348 if (err)
1349 return -EFAULT;
1350
1351 return sys_set_mempolicy(mode, nm, nr_bits+1);
1352}
1353
1354asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1355 compat_ulong_t mode, compat_ulong_t __user *nmask,
1356 compat_ulong_t maxnode, compat_ulong_t flags)
1357{
1358 long err = 0;
1359 unsigned long __user *nm = NULL;
1360 unsigned long nr_bits, alloc_size;
dfcd3c0d 1361 nodemask_t bm;
1da177e4
LT
1362
1363 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1364 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1365
1366 if (nmask) {
dfcd3c0d 1367 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1368 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1369 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1370 }
1371
1372 if (err)
1373 return -EFAULT;
1374
1375 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1376}
1377
1378#endif
1379
480eccf9
LS
1380/*
1381 * get_vma_policy(@task, @vma, @addr)
1382 * @task - task for fallback if vma policy == default
1383 * @vma - virtual memory area whose policy is sought
1384 * @addr - address in @vma for shared policy lookup
1385 *
1386 * Returns effective policy for a VMA at specified address.
1387 * Falls back to @task or system default policy, as necessary.
52cd3b07
LS
1388 * Current or other task's task mempolicy and non-shared vma policies
1389 * are protected by the task's mmap_sem, which must be held for read by
1390 * the caller.
1391 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1392 * count--added by the get_policy() vm_op, as appropriate--to protect against
1393 * freeing by another task. It is the caller's responsibility to free the
1394 * extra reference for shared policies.
480eccf9 1395 */
ae4d8c16 1396static struct mempolicy *get_vma_policy(struct task_struct *task,
48fce342 1397 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1398{
6e21c8f1 1399 struct mempolicy *pol = task->mempolicy;
1da177e4
LT
1400
1401 if (vma) {
480eccf9 1402 if (vma->vm_ops && vma->vm_ops->get_policy) {
ae4d8c16
LS
1403 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1404 addr);
1405 if (vpol)
1406 pol = vpol;
bea904d5 1407 } else if (vma->vm_policy)
1da177e4
LT
1408 pol = vma->vm_policy;
1409 }
1410 if (!pol)
1411 pol = &default_policy;
1412 return pol;
1413}
1414
52cd3b07
LS
1415/*
1416 * Return a nodemask representing a mempolicy for filtering nodes for
1417 * page allocation
1418 */
1419static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1420{
1421 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1422 if (unlikely(policy->mode == MPOL_BIND) &&
19770b32
MG
1423 gfp_zone(gfp) >= policy_zone &&
1424 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1425 return &policy->v.nodes;
1426
1427 return NULL;
1428}
1429
52cd3b07
LS
1430/* Return a zonelist indicated by gfp for node representing a mempolicy */
1431static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1da177e4 1432{
fc36b8d3 1433 int nd = numa_node_id();
1da177e4 1434
45c4745a 1435 switch (policy->mode) {
1da177e4 1436 case MPOL_PREFERRED:
fc36b8d3
LS
1437 if (!(policy->flags & MPOL_F_LOCAL))
1438 nd = policy->v.preferred_node;
1da177e4
LT
1439 break;
1440 case MPOL_BIND:
19770b32 1441 /*
52cd3b07
LS
1442 * Normally, MPOL_BIND allocations are node-local within the
1443 * allowed nodemask. However, if __GFP_THISNODE is set and the
1444 * current node is part of the mask, we use the zonelist for
1445 * the first node in the mask instead.
19770b32 1446 */
19770b32
MG
1447 if (unlikely(gfp & __GFP_THISNODE) &&
1448 unlikely(!node_isset(nd, policy->v.nodes)))
1449 nd = first_node(policy->v.nodes);
1450 break;
1da177e4 1451 case MPOL_INTERLEAVE: /* should not happen */
1da177e4
LT
1452 break;
1453 default:
1da177e4
LT
1454 BUG();
1455 }
0e88460d 1456 return node_zonelist(nd, gfp);
1da177e4
LT
1457}
1458
1459/* Do dynamic interleaving for a process */
1460static unsigned interleave_nodes(struct mempolicy *policy)
1461{
1462 unsigned nid, next;
1463 struct task_struct *me = current;
1464
1465 nid = me->il_next;
dfcd3c0d 1466 next = next_node(nid, policy->v.nodes);
1da177e4 1467 if (next >= MAX_NUMNODES)
dfcd3c0d 1468 next = first_node(policy->v.nodes);
f5b087b5
DR
1469 if (next < MAX_NUMNODES)
1470 me->il_next = next;
1da177e4
LT
1471 return nid;
1472}
1473
dc85da15
CL
1474/*
1475 * Depending on the memory policy provide a node from which to allocate the
1476 * next slab entry.
52cd3b07
LS
1477 * @policy must be protected by freeing by the caller. If @policy is
1478 * the current task's mempolicy, this protection is implicit, as only the
1479 * task can change it's policy. The system default policy requires no
1480 * such protection.
dc85da15
CL
1481 */
1482unsigned slab_node(struct mempolicy *policy)
1483{
fc36b8d3 1484 if (!policy || policy->flags & MPOL_F_LOCAL)
bea904d5
LS
1485 return numa_node_id();
1486
1487 switch (policy->mode) {
1488 case MPOL_PREFERRED:
fc36b8d3
LS
1489 /*
1490 * handled MPOL_F_LOCAL above
1491 */
1492 return policy->v.preferred_node;
765c4507 1493
dc85da15
CL
1494 case MPOL_INTERLEAVE:
1495 return interleave_nodes(policy);
1496
dd1a239f 1497 case MPOL_BIND: {
dc85da15
CL
1498 /*
1499 * Follow bind policy behavior and start allocation at the
1500 * first node.
1501 */
19770b32
MG
1502 struct zonelist *zonelist;
1503 struct zone *zone;
1504 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1505 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1506 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1507 &policy->v.nodes,
1508 &zone);
1509 return zone->node;
dd1a239f 1510 }
dc85da15 1511
dc85da15 1512 default:
bea904d5 1513 BUG();
dc85da15
CL
1514 }
1515}
1516
1da177e4
LT
1517/* Do static interleaving for a VMA with known offset. */
1518static unsigned offset_il_node(struct mempolicy *pol,
1519 struct vm_area_struct *vma, unsigned long off)
1520{
dfcd3c0d 1521 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1522 unsigned target;
1da177e4
LT
1523 int c;
1524 int nid = -1;
1525
f5b087b5
DR
1526 if (!nnodes)
1527 return numa_node_id();
1528 target = (unsigned int)off % nnodes;
1da177e4
LT
1529 c = 0;
1530 do {
dfcd3c0d 1531 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1532 c++;
1533 } while (c <= target);
1da177e4
LT
1534 return nid;
1535}
1536
5da7ca86
CL
1537/* Determine a node number for interleave */
1538static inline unsigned interleave_nid(struct mempolicy *pol,
1539 struct vm_area_struct *vma, unsigned long addr, int shift)
1540{
1541 if (vma) {
1542 unsigned long off;
1543
3b98b087
NA
1544 /*
1545 * for small pages, there is no difference between
1546 * shift and PAGE_SHIFT, so the bit-shift is safe.
1547 * for huge pages, since vm_pgoff is in units of small
1548 * pages, we need to shift off the always 0 bits to get
1549 * a useful offset.
1550 */
1551 BUG_ON(shift < PAGE_SHIFT);
1552 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1553 off += (addr - vma->vm_start) >> shift;
1554 return offset_il_node(pol, vma, off);
1555 } else
1556 return interleave_nodes(pol);
1557}
1558
00ac59ad 1559#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1560/*
1561 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1562 * @vma = virtual memory area whose policy is sought
1563 * @addr = address in @vma for shared policy lookup and interleave policy
1564 * @gfp_flags = for requested zone
19770b32
MG
1565 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1566 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1567 *
52cd3b07
LS
1568 * Returns a zonelist suitable for a huge page allocation and a pointer
1569 * to the struct mempolicy for conditional unref after allocation.
1570 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1571 * @nodemask for filtering the zonelist.
480eccf9 1572 */
396faf03 1573struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1574 gfp_t gfp_flags, struct mempolicy **mpol,
1575 nodemask_t **nodemask)
5da7ca86 1576{
480eccf9 1577 struct zonelist *zl;
5da7ca86 1578
52cd3b07 1579 *mpol = get_vma_policy(current, vma, addr);
19770b32 1580 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1581
52cd3b07
LS
1582 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1583 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
a5516438 1584 huge_page_shift(hstate_vma(vma))), gfp_flags);
52cd3b07
LS
1585 } else {
1586 zl = policy_zonelist(gfp_flags, *mpol);
1587 if ((*mpol)->mode == MPOL_BIND)
1588 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1589 }
1590 return zl;
5da7ca86 1591}
06808b08
LS
1592
1593/*
1594 * init_nodemask_of_mempolicy
1595 *
1596 * If the current task's mempolicy is "default" [NULL], return 'false'
1597 * to indicate default policy. Otherwise, extract the policy nodemask
1598 * for 'bind' or 'interleave' policy into the argument nodemask, or
1599 * initialize the argument nodemask to contain the single node for
1600 * 'preferred' or 'local' policy and return 'true' to indicate presence
1601 * of non-default mempolicy.
1602 *
1603 * We don't bother with reference counting the mempolicy [mpol_get/put]
1604 * because the current task is examining it's own mempolicy and a task's
1605 * mempolicy is only ever changed by the task itself.
1606 *
1607 * N.B., it is the caller's responsibility to free a returned nodemask.
1608 */
1609bool init_nodemask_of_mempolicy(nodemask_t *mask)
1610{
1611 struct mempolicy *mempolicy;
1612 int nid;
1613
1614 if (!(mask && current->mempolicy))
1615 return false;
1616
1617 mempolicy = current->mempolicy;
1618 switch (mempolicy->mode) {
1619 case MPOL_PREFERRED:
1620 if (mempolicy->flags & MPOL_F_LOCAL)
1621 nid = numa_node_id();
1622 else
1623 nid = mempolicy->v.preferred_node;
1624 init_nodemask_of_node(mask, nid);
1625 break;
1626
1627 case MPOL_BIND:
1628 /* Fall through */
1629 case MPOL_INTERLEAVE:
1630 *mask = mempolicy->v.nodes;
1631 break;
1632
1633 default:
1634 BUG();
1635 }
1636
1637 return true;
1638}
00ac59ad 1639#endif
5da7ca86 1640
1da177e4
LT
1641/* Allocate a page in interleaved policy.
1642 Own path because it needs to do special accounting. */
662f3a0b
AK
1643static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1644 unsigned nid)
1da177e4
LT
1645{
1646 struct zonelist *zl;
1647 struct page *page;
1648
0e88460d 1649 zl = node_zonelist(nid, gfp);
1da177e4 1650 page = __alloc_pages(gfp, order, zl);
dd1a239f 1651 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1652 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1653 return page;
1654}
1655
1656/**
1657 * alloc_page_vma - Allocate a page for a VMA.
1658 *
1659 * @gfp:
1660 * %GFP_USER user allocation.
1661 * %GFP_KERNEL kernel allocations,
1662 * %GFP_HIGHMEM highmem/user allocations,
1663 * %GFP_FS allocation should not call back into a file system.
1664 * %GFP_ATOMIC don't sleep.
1665 *
1666 * @vma: Pointer to VMA or NULL if not available.
1667 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1668 *
1669 * This function allocates a page from the kernel page pool and applies
1670 * a NUMA policy associated with the VMA or the current process.
1671 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1672 * mm_struct of the VMA to prevent it from going away. Should be used for
1673 * all allocations for pages that will be mapped into
1674 * user space. Returns NULL when no page can be allocated.
1675 *
1676 * Should be called with the mm_sem of the vma hold.
1677 */
1678struct page *
dd0fc66f 1679alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1680{
6e21c8f1 1681 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1682 struct zonelist *zl;
1da177e4 1683
45c4745a 1684 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1da177e4 1685 unsigned nid;
5da7ca86
CL
1686
1687 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
52cd3b07 1688 mpol_cond_put(pol);
1da177e4
LT
1689 return alloc_page_interleave(gfp, 0, nid);
1690 }
52cd3b07
LS
1691 zl = policy_zonelist(gfp, pol);
1692 if (unlikely(mpol_needs_cond_ref(pol))) {
480eccf9 1693 /*
52cd3b07 1694 * slow path: ref counted shared policy
480eccf9 1695 */
19770b32 1696 struct page *page = __alloc_pages_nodemask(gfp, 0,
52cd3b07 1697 zl, policy_nodemask(gfp, pol));
f0be3d32 1698 __mpol_put(pol);
480eccf9
LS
1699 return page;
1700 }
1701 /*
1702 * fast path: default or task policy
1703 */
52cd3b07 1704 return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1da177e4
LT
1705}
1706
1707/**
1708 * alloc_pages_current - Allocate pages.
1709 *
1710 * @gfp:
1711 * %GFP_USER user allocation,
1712 * %GFP_KERNEL kernel allocation,
1713 * %GFP_HIGHMEM highmem allocation,
1714 * %GFP_FS don't call back into a file system.
1715 * %GFP_ATOMIC don't sleep.
1716 * @order: Power of two of allocation size in pages. 0 is a single page.
1717 *
1718 * Allocate a page from the kernel page pool. When not in
1719 * interrupt context and apply the current process NUMA policy.
1720 * Returns NULL when no page can be allocated.
1721 *
cf2a473c 1722 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1723 * 1) it's ok to take cpuset_sem (can WAIT), and
1724 * 2) allocating for current task (not interrupt).
1725 */
dd0fc66f 1726struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1727{
1728 struct mempolicy *pol = current->mempolicy;
1729
9b819d20 1730 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4 1731 pol = &default_policy;
52cd3b07
LS
1732
1733 /*
1734 * No reference counting needed for current->mempolicy
1735 * nor system default_policy
1736 */
45c4745a 1737 if (pol->mode == MPOL_INTERLEAVE)
1da177e4 1738 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
19770b32 1739 return __alloc_pages_nodemask(gfp, order,
52cd3b07 1740 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1da177e4
LT
1741}
1742EXPORT_SYMBOL(alloc_pages_current);
1743
4225399a 1744/*
846a16bf 1745 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
1746 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1747 * with the mems_allowed returned by cpuset_mems_allowed(). This
1748 * keeps mempolicies cpuset relative after its cpuset moves. See
1749 * further kernel/cpuset.c update_nodemask().
1750 */
4225399a 1751
846a16bf
LS
1752/* Slow path of a mempolicy duplicate */
1753struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
1754{
1755 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1756
1757 if (!new)
1758 return ERR_PTR(-ENOMEM);
99ee4ca7 1759 rcu_read_lock();
4225399a
PJ
1760 if (current_cpuset_is_being_rebound()) {
1761 nodemask_t mems = cpuset_mems_allowed(current);
1762 mpol_rebind_policy(old, &mems);
1763 }
99ee4ca7 1764 rcu_read_unlock();
1da177e4
LT
1765 *new = *old;
1766 atomic_set(&new->refcnt, 1);
1da177e4
LT
1767 return new;
1768}
1769
52cd3b07
LS
1770/*
1771 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1772 * eliminate the * MPOL_F_* flags that require conditional ref and
1773 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1774 * after return. Use the returned value.
1775 *
1776 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1777 * policy lookup, even if the policy needs/has extra ref on lookup.
1778 * shmem_readahead needs this.
1779 */
1780struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1781 struct mempolicy *frompol)
1782{
1783 if (!mpol_needs_cond_ref(frompol))
1784 return frompol;
1785
1786 *tompol = *frompol;
1787 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1788 __mpol_put(frompol);
1789 return tompol;
1790}
1791
f5b087b5
DR
1792static int mpol_match_intent(const struct mempolicy *a,
1793 const struct mempolicy *b)
1794{
1795 if (a->flags != b->flags)
1796 return 0;
1797 if (!mpol_store_user_nodemask(a))
1798 return 1;
1799 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1800}
1801
1da177e4
LT
1802/* Slow path of a mempolicy comparison */
1803int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1804{
1805 if (!a || !b)
1806 return 0;
45c4745a 1807 if (a->mode != b->mode)
1da177e4 1808 return 0;
45c4745a 1809 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
f5b087b5 1810 return 0;
45c4745a 1811 switch (a->mode) {
19770b32
MG
1812 case MPOL_BIND:
1813 /* Fall through */
1da177e4 1814 case MPOL_INTERLEAVE:
dfcd3c0d 1815 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 1816 case MPOL_PREFERRED:
fc36b8d3
LS
1817 return a->v.preferred_node == b->v.preferred_node &&
1818 a->flags == b->flags;
1da177e4
LT
1819 default:
1820 BUG();
1821 return 0;
1822 }
1823}
1824
1da177e4
LT
1825/*
1826 * Shared memory backing store policy support.
1827 *
1828 * Remember policies even when nobody has shared memory mapped.
1829 * The policies are kept in Red-Black tree linked from the inode.
1830 * They are protected by the sp->lock spinlock, which should be held
1831 * for any accesses to the tree.
1832 */
1833
1834/* lookup first element intersecting start-end */
1835/* Caller holds sp->lock */
1836static struct sp_node *
1837sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1838{
1839 struct rb_node *n = sp->root.rb_node;
1840
1841 while (n) {
1842 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1843
1844 if (start >= p->end)
1845 n = n->rb_right;
1846 else if (end <= p->start)
1847 n = n->rb_left;
1848 else
1849 break;
1850 }
1851 if (!n)
1852 return NULL;
1853 for (;;) {
1854 struct sp_node *w = NULL;
1855 struct rb_node *prev = rb_prev(n);
1856 if (!prev)
1857 break;
1858 w = rb_entry(prev, struct sp_node, nd);
1859 if (w->end <= start)
1860 break;
1861 n = prev;
1862 }
1863 return rb_entry(n, struct sp_node, nd);
1864}
1865
1866/* Insert a new shared policy into the list. */
1867/* Caller holds sp->lock */
1868static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1869{
1870 struct rb_node **p = &sp->root.rb_node;
1871 struct rb_node *parent = NULL;
1872 struct sp_node *nd;
1873
1874 while (*p) {
1875 parent = *p;
1876 nd = rb_entry(parent, struct sp_node, nd);
1877 if (new->start < nd->start)
1878 p = &(*p)->rb_left;
1879 else if (new->end > nd->end)
1880 p = &(*p)->rb_right;
1881 else
1882 BUG();
1883 }
1884 rb_link_node(&new->nd, parent, p);
1885 rb_insert_color(&new->nd, &sp->root);
140d5a49 1886 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 1887 new->policy ? new->policy->mode : 0);
1da177e4
LT
1888}
1889
1890/* Find shared policy intersecting idx */
1891struct mempolicy *
1892mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1893{
1894 struct mempolicy *pol = NULL;
1895 struct sp_node *sn;
1896
1897 if (!sp->root.rb_node)
1898 return NULL;
1899 spin_lock(&sp->lock);
1900 sn = sp_lookup(sp, idx, idx+1);
1901 if (sn) {
1902 mpol_get(sn->policy);
1903 pol = sn->policy;
1904 }
1905 spin_unlock(&sp->lock);
1906 return pol;
1907}
1908
1909static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1910{
140d5a49 1911 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 1912 rb_erase(&n->nd, &sp->root);
f0be3d32 1913 mpol_put(n->policy);
1da177e4
LT
1914 kmem_cache_free(sn_cache, n);
1915}
1916
dbcb0f19
AB
1917static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1918 struct mempolicy *pol)
1da177e4
LT
1919{
1920 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1921
1922 if (!n)
1923 return NULL;
1924 n->start = start;
1925 n->end = end;
1926 mpol_get(pol);
aab0b102 1927 pol->flags |= MPOL_F_SHARED; /* for unref */
1da177e4
LT
1928 n->policy = pol;
1929 return n;
1930}
1931
1932/* Replace a policy range. */
1933static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1934 unsigned long end, struct sp_node *new)
1935{
1936 struct sp_node *n, *new2 = NULL;
1937
1938restart:
1939 spin_lock(&sp->lock);
1940 n = sp_lookup(sp, start, end);
1941 /* Take care of old policies in the same range. */
1942 while (n && n->start < end) {
1943 struct rb_node *next = rb_next(&n->nd);
1944 if (n->start >= start) {
1945 if (n->end <= end)
1946 sp_delete(sp, n);
1947 else
1948 n->start = end;
1949 } else {
1950 /* Old policy spanning whole new range. */
1951 if (n->end > end) {
1952 if (!new2) {
1953 spin_unlock(&sp->lock);
1954 new2 = sp_alloc(end, n->end, n->policy);
1955 if (!new2)
1956 return -ENOMEM;
1957 goto restart;
1958 }
1959 n->end = start;
1960 sp_insert(sp, new2);
1961 new2 = NULL;
1962 break;
1963 } else
1964 n->end = start;
1965 }
1966 if (!next)
1967 break;
1968 n = rb_entry(next, struct sp_node, nd);
1969 }
1970 if (new)
1971 sp_insert(sp, new);
1972 spin_unlock(&sp->lock);
1973 if (new2) {
f0be3d32 1974 mpol_put(new2->policy);
1da177e4
LT
1975 kmem_cache_free(sn_cache, new2);
1976 }
1977 return 0;
1978}
1979
71fe804b
LS
1980/**
1981 * mpol_shared_policy_init - initialize shared policy for inode
1982 * @sp: pointer to inode shared policy
1983 * @mpol: struct mempolicy to install
1984 *
1985 * Install non-NULL @mpol in inode's shared policy rb-tree.
1986 * On entry, the current task has a reference on a non-NULL @mpol.
1987 * This must be released on exit.
4bfc4495 1988 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
1989 */
1990void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1991{
58568d2a
MX
1992 int ret;
1993
71fe804b
LS
1994 sp->root = RB_ROOT; /* empty tree == default mempolicy */
1995 spin_lock_init(&sp->lock);
1996
1997 if (mpol) {
1998 struct vm_area_struct pvma;
1999 struct mempolicy *new;
4bfc4495 2000 NODEMASK_SCRATCH(scratch);
71fe804b 2001
4bfc4495
KH
2002 if (!scratch)
2003 return;
71fe804b
LS
2004 /* contextualize the tmpfs mount point mempolicy */
2005 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
58568d2a
MX
2006 if (IS_ERR(new)) {
2007 mpol_put(mpol); /* drop our ref on sb mpol */
4bfc4495 2008 NODEMASK_SCRATCH_FREE(scratch);
71fe804b 2009 return; /* no valid nodemask intersection */
58568d2a
MX
2010 }
2011
2012 task_lock(current);
4bfc4495 2013 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a
MX
2014 task_unlock(current);
2015 mpol_put(mpol); /* drop our ref on sb mpol */
2016 if (ret) {
4bfc4495 2017 NODEMASK_SCRATCH_FREE(scratch);
58568d2a
MX
2018 mpol_put(new);
2019 return;
2020 }
71fe804b
LS
2021
2022 /* Create pseudo-vma that contains just the policy */
2023 memset(&pvma, 0, sizeof(struct vm_area_struct));
2024 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2025 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2026 mpol_put(new); /* drop initial ref */
4bfc4495 2027 NODEMASK_SCRATCH_FREE(scratch);
7339ff83
RH
2028 }
2029}
2030
1da177e4
LT
2031int mpol_set_shared_policy(struct shared_policy *info,
2032 struct vm_area_struct *vma, struct mempolicy *npol)
2033{
2034 int err;
2035 struct sp_node *new = NULL;
2036 unsigned long sz = vma_pages(vma);
2037
028fec41 2038 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2039 vma->vm_pgoff,
45c4745a 2040 sz, npol ? npol->mode : -1,
028fec41 2041 npol ? npol->flags : -1,
140d5a49 2042 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
2043
2044 if (npol) {
2045 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2046 if (!new)
2047 return -ENOMEM;
2048 }
2049 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2050 if (err && new)
2051 kmem_cache_free(sn_cache, new);
2052 return err;
2053}
2054
2055/* Free a backing policy store on inode delete. */
2056void mpol_free_shared_policy(struct shared_policy *p)
2057{
2058 struct sp_node *n;
2059 struct rb_node *next;
2060
2061 if (!p->root.rb_node)
2062 return;
2063 spin_lock(&p->lock);
2064 next = rb_first(&p->root);
2065 while (next) {
2066 n = rb_entry(next, struct sp_node, nd);
2067 next = rb_next(&n->nd);
90c5029e 2068 rb_erase(&n->nd, &p->root);
f0be3d32 2069 mpol_put(n->policy);
1da177e4
LT
2070 kmem_cache_free(sn_cache, n);
2071 }
2072 spin_unlock(&p->lock);
1da177e4
LT
2073}
2074
2075/* assumes fs == KERNEL_DS */
2076void __init numa_policy_init(void)
2077{
b71636e2
PM
2078 nodemask_t interleave_nodes;
2079 unsigned long largest = 0;
2080 int nid, prefer = 0;
2081
1da177e4
LT
2082 policy_cache = kmem_cache_create("numa_policy",
2083 sizeof(struct mempolicy),
20c2df83 2084 0, SLAB_PANIC, NULL);
1da177e4
LT
2085
2086 sn_cache = kmem_cache_create("shared_policy_node",
2087 sizeof(struct sp_node),
20c2df83 2088 0, SLAB_PANIC, NULL);
1da177e4 2089
b71636e2
PM
2090 /*
2091 * Set interleaving policy for system init. Interleaving is only
2092 * enabled across suitably sized nodes (default is >= 16MB), or
2093 * fall back to the largest node if they're all smaller.
2094 */
2095 nodes_clear(interleave_nodes);
56bbd65d 2096 for_each_node_state(nid, N_HIGH_MEMORY) {
b71636e2
PM
2097 unsigned long total_pages = node_present_pages(nid);
2098
2099 /* Preserve the largest node */
2100 if (largest < total_pages) {
2101 largest = total_pages;
2102 prefer = nid;
2103 }
2104
2105 /* Interleave this node? */
2106 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2107 node_set(nid, interleave_nodes);
2108 }
2109
2110 /* All too small, use the largest */
2111 if (unlikely(nodes_empty(interleave_nodes)))
2112 node_set(prefer, interleave_nodes);
1da177e4 2113
028fec41 2114 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1da177e4
LT
2115 printk("numa_policy_init: interleaving failed\n");
2116}
2117
8bccd85f 2118/* Reset policy of current process to default */
1da177e4
LT
2119void numa_default_policy(void)
2120{
028fec41 2121 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2122}
68860ec1 2123
095f1fc4
LS
2124/*
2125 * Parse and format mempolicy from/to strings
2126 */
2127
1a75a6c8 2128/*
fc36b8d3 2129 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
3f226aa1 2130 * Used only for mpol_parse_str() and mpol_to_str()
1a75a6c8 2131 */
53f2556b 2132#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
15ad7cdc 2133static const char * const policy_types[] =
53f2556b 2134 { "default", "prefer", "bind", "interleave", "local" };
1a75a6c8 2135
095f1fc4
LS
2136
2137#ifdef CONFIG_TMPFS
2138/**
2139 * mpol_parse_str - parse string to mempolicy
2140 * @str: string containing mempolicy to parse
71fe804b
LS
2141 * @mpol: pointer to struct mempolicy pointer, returned on success.
2142 * @no_context: flag whether to "contextualize" the mempolicy
095f1fc4
LS
2143 *
2144 * Format of input:
2145 * <mode>[=<flags>][:<nodelist>]
2146 *
71fe804b
LS
2147 * if @no_context is true, save the input nodemask in w.user_nodemask in
2148 * the returned mempolicy. This will be used to "clone" the mempolicy in
2149 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2150 * mount option. Note that if 'static' or 'relative' mode flags were
2151 * specified, the input nodemask will already have been saved. Saving
2152 * it again is redundant, but safe.
2153 *
2154 * On success, returns 0, else 1
095f1fc4 2155 */
71fe804b 2156int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
095f1fc4 2157{
71fe804b
LS
2158 struct mempolicy *new = NULL;
2159 unsigned short uninitialized_var(mode);
2160 unsigned short uninitialized_var(mode_flags);
2161 nodemask_t nodes;
095f1fc4
LS
2162 char *nodelist = strchr(str, ':');
2163 char *flags = strchr(str, '=');
2164 int i;
2165 int err = 1;
2166
2167 if (nodelist) {
2168 /* NUL-terminate mode or flags string */
2169 *nodelist++ = '\0';
71fe804b 2170 if (nodelist_parse(nodelist, nodes))
095f1fc4 2171 goto out;
71fe804b 2172 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
095f1fc4 2173 goto out;
71fe804b
LS
2174 } else
2175 nodes_clear(nodes);
2176
095f1fc4
LS
2177 if (flags)
2178 *flags++ = '\0'; /* terminate mode string */
2179
3f226aa1 2180 for (i = 0; i <= MPOL_LOCAL; i++) {
095f1fc4 2181 if (!strcmp(str, policy_types[i])) {
71fe804b 2182 mode = i;
095f1fc4
LS
2183 break;
2184 }
2185 }
3f226aa1 2186 if (i > MPOL_LOCAL)
095f1fc4
LS
2187 goto out;
2188
71fe804b 2189 switch (mode) {
095f1fc4 2190 case MPOL_PREFERRED:
71fe804b
LS
2191 /*
2192 * Insist on a nodelist of one node only
2193 */
095f1fc4
LS
2194 if (nodelist) {
2195 char *rest = nodelist;
2196 while (isdigit(*rest))
2197 rest++;
926f2ae0
KM
2198 if (*rest)
2199 goto out;
095f1fc4
LS
2200 }
2201 break;
095f1fc4
LS
2202 case MPOL_INTERLEAVE:
2203 /*
2204 * Default to online nodes with memory if no nodelist
2205 */
2206 if (!nodelist)
71fe804b 2207 nodes = node_states[N_HIGH_MEMORY];
3f226aa1 2208 break;
71fe804b 2209 case MPOL_LOCAL:
3f226aa1 2210 /*
71fe804b 2211 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2212 */
71fe804b 2213 if (nodelist)
3f226aa1 2214 goto out;
71fe804b 2215 mode = MPOL_PREFERRED;
3f226aa1 2216 break;
413b43de
RT
2217 case MPOL_DEFAULT:
2218 /*
2219 * Insist on a empty nodelist
2220 */
2221 if (!nodelist)
2222 err = 0;
2223 goto out;
d69b2e63
KM
2224 case MPOL_BIND:
2225 /*
2226 * Insist on a nodelist
2227 */
2228 if (!nodelist)
2229 goto out;
095f1fc4
LS
2230 }
2231
71fe804b 2232 mode_flags = 0;
095f1fc4
LS
2233 if (flags) {
2234 /*
2235 * Currently, we only support two mutually exclusive
2236 * mode flags.
2237 */
2238 if (!strcmp(flags, "static"))
71fe804b 2239 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2240 else if (!strcmp(flags, "relative"))
71fe804b 2241 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2242 else
926f2ae0 2243 goto out;
095f1fc4 2244 }
71fe804b
LS
2245
2246 new = mpol_new(mode, mode_flags, &nodes);
2247 if (IS_ERR(new))
926f2ae0
KM
2248 goto out;
2249
2250 {
58568d2a 2251 int ret;
4bfc4495
KH
2252 NODEMASK_SCRATCH(scratch);
2253 if (scratch) {
2254 task_lock(current);
2255 ret = mpol_set_nodemask(new, &nodes, scratch);
2256 task_unlock(current);
2257 } else
2258 ret = -ENOMEM;
2259 NODEMASK_SCRATCH_FREE(scratch);
2260 if (ret) {
4bfc4495 2261 mpol_put(new);
926f2ae0 2262 goto out;
58568d2a
MX
2263 }
2264 }
926f2ae0
KM
2265 err = 0;
2266 if (no_context) {
2267 /* save for contextualization */
2268 new->w.user_nodemask = nodes;
2269 }
71fe804b 2270
095f1fc4
LS
2271out:
2272 /* Restore string for error message */
2273 if (nodelist)
2274 *--nodelist = ':';
2275 if (flags)
2276 *--flags = '=';
71fe804b
LS
2277 if (!err)
2278 *mpol = new;
095f1fc4
LS
2279 return err;
2280}
2281#endif /* CONFIG_TMPFS */
2282
71fe804b
LS
2283/**
2284 * mpol_to_str - format a mempolicy structure for printing
2285 * @buffer: to contain formatted mempolicy string
2286 * @maxlen: length of @buffer
2287 * @pol: pointer to mempolicy to be formatted
2288 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2289 *
1a75a6c8
CL
2290 * Convert a mempolicy into a string.
2291 * Returns the number of characters in buffer (if positive)
2292 * or an error (negative)
2293 */
71fe804b 2294int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
1a75a6c8
CL
2295{
2296 char *p = buffer;
2297 int l;
2298 nodemask_t nodes;
bea904d5 2299 unsigned short mode;
f5b087b5 2300 unsigned short flags = pol ? pol->flags : 0;
1a75a6c8 2301
2291990a
LS
2302 /*
2303 * Sanity check: room for longest mode, flag and some nodes
2304 */
2305 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2306
bea904d5
LS
2307 if (!pol || pol == &default_policy)
2308 mode = MPOL_DEFAULT;
2309 else
2310 mode = pol->mode;
2311
1a75a6c8
CL
2312 switch (mode) {
2313 case MPOL_DEFAULT:
2314 nodes_clear(nodes);
2315 break;
2316
2317 case MPOL_PREFERRED:
2318 nodes_clear(nodes);
fc36b8d3 2319 if (flags & MPOL_F_LOCAL)
53f2556b
LS
2320 mode = MPOL_LOCAL; /* pseudo-policy */
2321 else
fc36b8d3 2322 node_set(pol->v.preferred_node, nodes);
1a75a6c8
CL
2323 break;
2324
2325 case MPOL_BIND:
19770b32 2326 /* Fall through */
1a75a6c8 2327 case MPOL_INTERLEAVE:
71fe804b
LS
2328 if (no_context)
2329 nodes = pol->w.user_nodemask;
2330 else
2331 nodes = pol->v.nodes;
1a75a6c8
CL
2332 break;
2333
2334 default:
2335 BUG();
1a75a6c8
CL
2336 }
2337
2338 l = strlen(policy_types[mode]);
53f2556b
LS
2339 if (buffer + maxlen < p + l + 1)
2340 return -ENOSPC;
1a75a6c8
CL
2341
2342 strcpy(p, policy_types[mode]);
2343 p += l;
2344
fc36b8d3 2345 if (flags & MPOL_MODE_FLAGS) {
f5b087b5
DR
2346 if (buffer + maxlen < p + 2)
2347 return -ENOSPC;
2348 *p++ = '=';
2349
2291990a
LS
2350 /*
2351 * Currently, the only defined flags are mutually exclusive
2352 */
f5b087b5 2353 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2354 p += snprintf(p, buffer + maxlen - p, "static");
2355 else if (flags & MPOL_F_RELATIVE_NODES)
2356 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2357 }
2358
1a75a6c8
CL
2359 if (!nodes_empty(nodes)) {
2360 if (buffer + maxlen < p + 2)
2361 return -ENOSPC;
095f1fc4 2362 *p++ = ':';
1a75a6c8
CL
2363 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2364 }
2365 return p - buffer;
2366}
2367
2368struct numa_maps {
2369 unsigned long pages;
2370 unsigned long anon;
397874df
CL
2371 unsigned long active;
2372 unsigned long writeback;
1a75a6c8 2373 unsigned long mapcount_max;
397874df
CL
2374 unsigned long dirty;
2375 unsigned long swapcache;
1a75a6c8
CL
2376 unsigned long node[MAX_NUMNODES];
2377};
2378
397874df 2379static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
2380{
2381 struct numa_maps *md = private;
2382 int count = page_mapcount(page);
2383
397874df
CL
2384 md->pages++;
2385 if (pte_dirty || PageDirty(page))
2386 md->dirty++;
1a75a6c8 2387
397874df
CL
2388 if (PageSwapCache(page))
2389 md->swapcache++;
1a75a6c8 2390
894bc310 2391 if (PageActive(page) || PageUnevictable(page))
397874df
CL
2392 md->active++;
2393
2394 if (PageWriteback(page))
2395 md->writeback++;
1a75a6c8
CL
2396
2397 if (PageAnon(page))
2398 md->anon++;
2399
397874df
CL
2400 if (count > md->mapcount_max)
2401 md->mapcount_max = count;
2402
1a75a6c8 2403 md->node[page_to_nid(page)]++;
1a75a6c8
CL
2404}
2405
7f709ed0 2406#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
2407static void check_huge_range(struct vm_area_struct *vma,
2408 unsigned long start, unsigned long end,
2409 struct numa_maps *md)
2410{
2411 unsigned long addr;
2412 struct page *page;
a5516438
AK
2413 struct hstate *h = hstate_vma(vma);
2414 unsigned long sz = huge_page_size(h);
397874df 2415
a5516438
AK
2416 for (addr = start; addr < end; addr += sz) {
2417 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2418 addr & huge_page_mask(h));
397874df
CL
2419 pte_t pte;
2420
2421 if (!ptep)
2422 continue;
2423
2424 pte = *ptep;
2425 if (pte_none(pte))
2426 continue;
2427
2428 page = pte_page(pte);
2429 if (!page)
2430 continue;
2431
2432 gather_stats(page, md, pte_dirty(*ptep));
2433 }
2434}
7f709ed0
AM
2435#else
2436static inline void check_huge_range(struct vm_area_struct *vma,
2437 unsigned long start, unsigned long end,
2438 struct numa_maps *md)
2439{
2440}
2441#endif
397874df 2442
53f2556b
LS
2443/*
2444 * Display pages allocated per node and memory policy via /proc.
2445 */
1a75a6c8
CL
2446int show_numa_map(struct seq_file *m, void *v)
2447{
99f89551 2448 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
2449 struct vm_area_struct *vma = v;
2450 struct numa_maps *md;
397874df
CL
2451 struct file *file = vma->vm_file;
2452 struct mm_struct *mm = vma->vm_mm;
480eccf9 2453 struct mempolicy *pol;
1a75a6c8
CL
2454 int n;
2455 char buffer[50];
2456
397874df 2457 if (!mm)
1a75a6c8
CL
2458 return 0;
2459
2460 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2461 if (!md)
2462 return 0;
2463
480eccf9 2464 pol = get_vma_policy(priv->task, vma, vma->vm_start);
71fe804b 2465 mpol_to_str(buffer, sizeof(buffer), pol, 0);
52cd3b07 2466 mpol_cond_put(pol);
397874df
CL
2467
2468 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2469
2470 if (file) {
2471 seq_printf(m, " file=");
c32c2f63 2472 seq_path(m, &file->f_path, "\n\t= ");
397874df
CL
2473 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2474 seq_printf(m, " heap");
2475 } else if (vma->vm_start <= mm->start_stack &&
2476 vma->vm_end >= mm->start_stack) {
2477 seq_printf(m, " stack");
2478 }
2479
2480 if (is_vm_hugetlb_page(vma)) {
2481 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2482 seq_printf(m, " huge");
2483 } else {
a57ebfdb 2484 check_pgd_range(vma, vma->vm_start, vma->vm_end,
56bbd65d 2485 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
397874df
CL
2486 }
2487
2488 if (!md->pages)
2489 goto out;
1a75a6c8 2490
397874df
CL
2491 if (md->anon)
2492 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 2493
397874df
CL
2494 if (md->dirty)
2495 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 2496
397874df
CL
2497 if (md->pages != md->anon && md->pages != md->dirty)
2498 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 2499
397874df
CL
2500 if (md->mapcount_max > 1)
2501 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 2502
397874df
CL
2503 if (md->swapcache)
2504 seq_printf(m," swapcache=%lu", md->swapcache);
2505
2506 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2507 seq_printf(m," active=%lu", md->active);
2508
2509 if (md->writeback)
2510 seq_printf(m," writeback=%lu", md->writeback);
2511
56bbd65d 2512 for_each_node_state(n, N_HIGH_MEMORY)
397874df
CL
2513 if (md->node[n])
2514 seq_printf(m, " N%d=%lu", n, md->node[n]);
2515out:
2516 seq_putc(m, '\n');
1a75a6c8
CL
2517 kfree(md);
2518
2519 if (m->count < m->size)
99f89551 2520 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
2521 return 0;
2522}