]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/mempolicy.c
mempolicy: use MPOL_F_LOCAL to Indicate Preferred Local Policy
[net-next-2.6.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
1da177e4
LT
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/gfp.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/module.h>
b488893a 80#include <linux/nsproxy.h>
1da177e4
LT
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
dc9aa5b9 84#include <linux/swap.h>
1a75a6c8
CL
85#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
b20a3503 87#include <linux/migrate.h>
95a402c3 88#include <linux/rmap.h>
86c3a764 89#include <linux/security.h>
dbcb0f19 90#include <linux/syscalls.h>
dc9aa5b9 91
1da177e4
LT
92#include <asm/tlbflush.h>
93#include <asm/uaccess.h>
94
38e35860 95/* Internal flags */
dc9aa5b9 96#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 97#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 98#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 99
fcc234f8
PE
100static struct kmem_cache *policy_cache;
101static struct kmem_cache *sn_cache;
1da177e4 102
1da177e4
LT
103/* Highest zone. An specific allocation for a zone below that is not
104 policied. */
6267276f 105enum zone_type policy_zone = 0;
1da177e4 106
bea904d5
LS
107/*
108 * run-time system-wide default policy => local allocation
109 */
d42c6997 110struct mempolicy default_policy = {
1da177e4 111 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 112 .mode = MPOL_PREFERRED,
fc36b8d3 113 .flags = MPOL_F_LOCAL,
1da177e4
LT
114};
115
37012946
DR
116static const struct mempolicy_operations {
117 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
118 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
119} mpol_ops[MPOL_MAX];
120
19770b32 121/* Check that the nodemask contains at least one populated zone */
37012946 122static int is_valid_nodemask(const nodemask_t *nodemask)
1da177e4 123{
19770b32 124 int nd, k;
1da177e4 125
19770b32
MG
126 /* Check that there is something useful in this mask */
127 k = policy_zone;
128
129 for_each_node_mask(nd, *nodemask) {
130 struct zone *z;
131
132 for (k = 0; k <= policy_zone; k++) {
133 z = &NODE_DATA(nd)->node_zones[k];
134 if (z->present_pages > 0)
135 return 1;
dd942ae3 136 }
8af5e2eb 137 }
19770b32
MG
138
139 return 0;
1da177e4
LT
140}
141
f5b087b5
DR
142static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
143{
4c50bc01
DR
144 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
145}
146
147static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
148 const nodemask_t *rel)
149{
150 nodemask_t tmp;
151 nodes_fold(tmp, *orig, nodes_weight(*rel));
152 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
153}
154
37012946
DR
155static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
156{
157 if (nodes_empty(*nodes))
158 return -EINVAL;
159 pol->v.nodes = *nodes;
160 return 0;
161}
162
163static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
164{
165 if (!nodes)
fc36b8d3 166 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
167 else if (nodes_empty(*nodes))
168 return -EINVAL; /* no allowed nodes */
169 else
170 pol->v.preferred_node = first_node(*nodes);
171 return 0;
172}
173
174static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
175{
176 if (!is_valid_nodemask(nodes))
177 return -EINVAL;
178 pol->v.nodes = *nodes;
179 return 0;
180}
181
1da177e4 182/* Create a new policy */
028fec41
DR
183static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
184 nodemask_t *nodes)
1da177e4
LT
185{
186 struct mempolicy *policy;
f5b087b5 187 nodemask_t cpuset_context_nmask;
37012946 188 int ret;
1da177e4 189
028fec41
DR
190 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
191 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
140d5a49 192
3e1f0645
DR
193 if (mode == MPOL_DEFAULT) {
194 if (nodes && !nodes_empty(*nodes))
37012946 195 return ERR_PTR(-EINVAL);
bea904d5 196 return NULL; /* simply delete any existing policy */
37012946 197 }
3e1f0645
DR
198 VM_BUG_ON(!nodes);
199
200 /*
201 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
202 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
203 * All other modes require a valid pointer to a non-empty nodemask.
204 */
205 if (mode == MPOL_PREFERRED) {
206 if (nodes_empty(*nodes)) {
207 if (((flags & MPOL_F_STATIC_NODES) ||
208 (flags & MPOL_F_RELATIVE_NODES)))
209 return ERR_PTR(-EINVAL);
210 nodes = NULL; /* flag local alloc */
211 }
212 } else if (nodes_empty(*nodes))
213 return ERR_PTR(-EINVAL);
1da177e4
LT
214 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
215 if (!policy)
216 return ERR_PTR(-ENOMEM);
217 atomic_set(&policy->refcnt, 1);
45c4745a 218 policy->mode = mode;
3e1f0645 219 policy->flags = flags;
37012946 220
3e1f0645
DR
221 if (nodes) {
222 /*
223 * cpuset related setup doesn't apply to local allocation
224 */
37012946
DR
225 cpuset_update_task_memory_state();
226 if (flags & MPOL_F_RELATIVE_NODES)
227 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
228 &cpuset_current_mems_allowed);
229 else
230 nodes_and(cpuset_context_nmask, *nodes,
231 cpuset_current_mems_allowed);
232 if (mpol_store_user_nodemask(policy))
233 policy->w.user_nodemask = *nodes;
234 else
235 policy->w.cpuset_mems_allowed =
236 cpuset_mems_allowed(current);
237 }
238
239 ret = mpol_ops[mode].create(policy,
3e1f0645 240 nodes ? &cpuset_context_nmask : NULL);
37012946
DR
241 if (ret < 0) {
242 kmem_cache_free(policy_cache, policy);
243 return ERR_PTR(ret);
244 }
1da177e4 245 return policy;
37012946
DR
246}
247
52cd3b07
LS
248/* Slow path of a mpol destructor. */
249void __mpol_put(struct mempolicy *p)
250{
251 if (!atomic_dec_and_test(&p->refcnt))
252 return;
52cd3b07
LS
253 kmem_cache_free(policy_cache, p);
254}
255
37012946
DR
256static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
257{
258}
259
260static void mpol_rebind_nodemask(struct mempolicy *pol,
261 const nodemask_t *nodes)
262{
263 nodemask_t tmp;
264
265 if (pol->flags & MPOL_F_STATIC_NODES)
266 nodes_and(tmp, pol->w.user_nodemask, *nodes);
267 else if (pol->flags & MPOL_F_RELATIVE_NODES)
268 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
269 else {
270 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
271 *nodes);
272 pol->w.cpuset_mems_allowed = *nodes;
273 }
f5b087b5 274
37012946
DR
275 pol->v.nodes = tmp;
276 if (!node_isset(current->il_next, tmp)) {
277 current->il_next = next_node(current->il_next, tmp);
278 if (current->il_next >= MAX_NUMNODES)
279 current->il_next = first_node(tmp);
280 if (current->il_next >= MAX_NUMNODES)
281 current->il_next = numa_node_id();
282 }
283}
284
285static void mpol_rebind_preferred(struct mempolicy *pol,
286 const nodemask_t *nodes)
287{
288 nodemask_t tmp;
289
37012946
DR
290 if (pol->flags & MPOL_F_STATIC_NODES) {
291 int node = first_node(pol->w.user_nodemask);
292
fc36b8d3 293 if (node_isset(node, *nodes)) {
37012946 294 pol->v.preferred_node = node;
fc36b8d3
LS
295 pol->flags &= ~MPOL_F_LOCAL;
296 } else
297 pol->flags |= MPOL_F_LOCAL;
37012946
DR
298 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
299 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
300 pol->v.preferred_node = first_node(tmp);
fc36b8d3 301 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
302 pol->v.preferred_node = node_remap(pol->v.preferred_node,
303 pol->w.cpuset_mems_allowed,
304 *nodes);
305 pol->w.cpuset_mems_allowed = *nodes;
306 }
1da177e4
LT
307}
308
1d0d2680
DR
309/* Migrate a policy to a different set of nodes */
310static void mpol_rebind_policy(struct mempolicy *pol,
311 const nodemask_t *newmask)
312{
1d0d2680
DR
313 if (!pol)
314 return;
1d0d2680
DR
315 if (!mpol_store_user_nodemask(pol) &&
316 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
317 return;
45c4745a 318 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
319}
320
321/*
322 * Wrapper for mpol_rebind_policy() that just requires task
323 * pointer, and updates task mempolicy.
324 */
325
326void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
327{
328 mpol_rebind_policy(tsk->mempolicy, new);
329}
330
331/*
332 * Rebind each vma in mm to new nodemask.
333 *
334 * Call holding a reference to mm. Takes mm->mmap_sem during call.
335 */
336
337void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
338{
339 struct vm_area_struct *vma;
340
341 down_write(&mm->mmap_sem);
342 for (vma = mm->mmap; vma; vma = vma->vm_next)
343 mpol_rebind_policy(vma->vm_policy, new);
344 up_write(&mm->mmap_sem);
345}
346
37012946
DR
347static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
348 [MPOL_DEFAULT] = {
349 .rebind = mpol_rebind_default,
350 },
351 [MPOL_INTERLEAVE] = {
352 .create = mpol_new_interleave,
353 .rebind = mpol_rebind_nodemask,
354 },
355 [MPOL_PREFERRED] = {
356 .create = mpol_new_preferred,
357 .rebind = mpol_rebind_preferred,
358 },
359 [MPOL_BIND] = {
360 .create = mpol_new_bind,
361 .rebind = mpol_rebind_nodemask,
362 },
363};
364
397874df 365static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
366static void migrate_page_add(struct page *page, struct list_head *pagelist,
367 unsigned long flags);
1a75a6c8 368
38e35860 369/* Scan through pages checking if pages follow certain conditions. */
b5810039 370static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
371 unsigned long addr, unsigned long end,
372 const nodemask_t *nodes, unsigned long flags,
38e35860 373 void *private)
1da177e4 374{
91612e0d
HD
375 pte_t *orig_pte;
376 pte_t *pte;
705e87c0 377 spinlock_t *ptl;
941150a3 378
705e87c0 379 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 380 do {
6aab341e 381 struct page *page;
25ba77c1 382 int nid;
91612e0d
HD
383
384 if (!pte_present(*pte))
1da177e4 385 continue;
6aab341e
LT
386 page = vm_normal_page(vma, addr, *pte);
387 if (!page)
1da177e4 388 continue;
053837fc
NP
389 /*
390 * The check for PageReserved here is important to avoid
391 * handling zero pages and other pages that may have been
392 * marked special by the system.
393 *
394 * If the PageReserved would not be checked here then f.e.
395 * the location of the zero page could have an influence
396 * on MPOL_MF_STRICT, zero pages would be counted for
397 * the per node stats, and there would be useless attempts
398 * to put zero pages on the migration list.
399 */
f4598c8b
CL
400 if (PageReserved(page))
401 continue;
6aab341e 402 nid = page_to_nid(page);
38e35860
CL
403 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
404 continue;
405
1a75a6c8 406 if (flags & MPOL_MF_STATS)
397874df 407 gather_stats(page, private, pte_dirty(*pte));
053837fc 408 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 409 migrate_page_add(page, private, flags);
38e35860
CL
410 else
411 break;
91612e0d 412 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 413 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
414 return addr != end;
415}
416
b5810039 417static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
418 unsigned long addr, unsigned long end,
419 const nodemask_t *nodes, unsigned long flags,
38e35860 420 void *private)
91612e0d
HD
421{
422 pmd_t *pmd;
423 unsigned long next;
424
425 pmd = pmd_offset(pud, addr);
426 do {
427 next = pmd_addr_end(addr, end);
428 if (pmd_none_or_clear_bad(pmd))
429 continue;
dc9aa5b9 430 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 431 flags, private))
91612e0d
HD
432 return -EIO;
433 } while (pmd++, addr = next, addr != end);
434 return 0;
435}
436
b5810039 437static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
438 unsigned long addr, unsigned long end,
439 const nodemask_t *nodes, unsigned long flags,
38e35860 440 void *private)
91612e0d
HD
441{
442 pud_t *pud;
443 unsigned long next;
444
445 pud = pud_offset(pgd, addr);
446 do {
447 next = pud_addr_end(addr, end);
448 if (pud_none_or_clear_bad(pud))
449 continue;
dc9aa5b9 450 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 451 flags, private))
91612e0d
HD
452 return -EIO;
453 } while (pud++, addr = next, addr != end);
454 return 0;
455}
456
b5810039 457static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
458 unsigned long addr, unsigned long end,
459 const nodemask_t *nodes, unsigned long flags,
38e35860 460 void *private)
91612e0d
HD
461{
462 pgd_t *pgd;
463 unsigned long next;
464
b5810039 465 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
466 do {
467 next = pgd_addr_end(addr, end);
468 if (pgd_none_or_clear_bad(pgd))
469 continue;
dc9aa5b9 470 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 471 flags, private))
91612e0d
HD
472 return -EIO;
473 } while (pgd++, addr = next, addr != end);
474 return 0;
1da177e4
LT
475}
476
dc9aa5b9
CL
477/*
478 * Check if all pages in a range are on a set of nodes.
479 * If pagelist != NULL then isolate pages from the LRU and
480 * put them on the pagelist.
481 */
1da177e4
LT
482static struct vm_area_struct *
483check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 484 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
485{
486 int err;
487 struct vm_area_struct *first, *vma, *prev;
488
90036ee5 489 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
90036ee5 490
b20a3503
CL
491 err = migrate_prep();
492 if (err)
493 return ERR_PTR(err);
90036ee5 494 }
053837fc 495
1da177e4
LT
496 first = find_vma(mm, start);
497 if (!first)
498 return ERR_PTR(-EFAULT);
499 prev = NULL;
500 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
501 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
502 if (!vma->vm_next && vma->vm_end < end)
503 return ERR_PTR(-EFAULT);
504 if (prev && prev->vm_end < vma->vm_start)
505 return ERR_PTR(-EFAULT);
506 }
507 if (!is_vm_hugetlb_page(vma) &&
508 ((flags & MPOL_MF_STRICT) ||
509 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
510 vma_migratable(vma)))) {
5b952b3c 511 unsigned long endvma = vma->vm_end;
dc9aa5b9 512
5b952b3c
AK
513 if (endvma > end)
514 endvma = end;
515 if (vma->vm_start > start)
516 start = vma->vm_start;
dc9aa5b9 517 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 518 flags, private);
1da177e4
LT
519 if (err) {
520 first = ERR_PTR(err);
521 break;
522 }
523 }
524 prev = vma;
525 }
526 return first;
527}
528
529/* Apply policy to a single VMA */
530static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
531{
532 int err = 0;
533 struct mempolicy *old = vma->vm_policy;
534
140d5a49 535 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
1da177e4
LT
536 vma->vm_start, vma->vm_end, vma->vm_pgoff,
537 vma->vm_ops, vma->vm_file,
538 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
539
540 if (vma->vm_ops && vma->vm_ops->set_policy)
541 err = vma->vm_ops->set_policy(vma, new);
542 if (!err) {
543 mpol_get(new);
544 vma->vm_policy = new;
f0be3d32 545 mpol_put(old);
1da177e4
LT
546 }
547 return err;
548}
549
550/* Step 2: apply policy to a range and do splits. */
551static int mbind_range(struct vm_area_struct *vma, unsigned long start,
552 unsigned long end, struct mempolicy *new)
553{
554 struct vm_area_struct *next;
555 int err;
556
557 err = 0;
558 for (; vma && vma->vm_start < end; vma = next) {
559 next = vma->vm_next;
560 if (vma->vm_start < start)
561 err = split_vma(vma->vm_mm, vma, start, 1);
562 if (!err && vma->vm_end > end)
563 err = split_vma(vma->vm_mm, vma, end, 0);
564 if (!err)
565 err = policy_vma(vma, new);
566 if (err)
567 break;
568 }
569 return err;
570}
571
c61afb18
PJ
572/*
573 * Update task->flags PF_MEMPOLICY bit: set iff non-default
574 * mempolicy. Allows more rapid checking of this (combined perhaps
575 * with other PF_* flag bits) on memory allocation hot code paths.
576 *
577 * If called from outside this file, the task 'p' should -only- be
578 * a newly forked child not yet visible on the task list, because
579 * manipulating the task flags of a visible task is not safe.
580 *
581 * The above limitation is why this routine has the funny name
582 * mpol_fix_fork_child_flag().
583 *
584 * It is also safe to call this with a task pointer of current,
585 * which the static wrapper mpol_set_task_struct_flag() does,
586 * for use within this file.
587 */
588
589void mpol_fix_fork_child_flag(struct task_struct *p)
590{
591 if (p->mempolicy)
592 p->flags |= PF_MEMPOLICY;
593 else
594 p->flags &= ~PF_MEMPOLICY;
595}
596
597static void mpol_set_task_struct_flag(void)
598{
599 mpol_fix_fork_child_flag(current);
600}
601
1da177e4 602/* Set the process memory policy */
028fec41
DR
603static long do_set_mempolicy(unsigned short mode, unsigned short flags,
604 nodemask_t *nodes)
1da177e4 605{
1da177e4 606 struct mempolicy *new;
f4e53d91 607 struct mm_struct *mm = current->mm;
1da177e4 608
028fec41 609 new = mpol_new(mode, flags, nodes);
1da177e4
LT
610 if (IS_ERR(new))
611 return PTR_ERR(new);
f4e53d91
LS
612
613 /*
614 * prevent changing our mempolicy while show_numa_maps()
615 * is using it.
616 * Note: do_set_mempolicy() can be called at init time
617 * with no 'mm'.
618 */
619 if (mm)
620 down_write(&mm->mmap_sem);
f0be3d32 621 mpol_put(current->mempolicy);
1da177e4 622 current->mempolicy = new;
c61afb18 623 mpol_set_task_struct_flag();
45c4745a 624 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 625 nodes_weight(new->v.nodes))
dfcd3c0d 626 current->il_next = first_node(new->v.nodes);
f4e53d91
LS
627 if (mm)
628 up_write(&mm->mmap_sem);
629
1da177e4
LT
630 return 0;
631}
632
bea904d5
LS
633/*
634 * Return nodemask for policy for get_mempolicy() query
635 */
636static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 637{
dfcd3c0d 638 nodes_clear(*nodes);
bea904d5
LS
639 if (p == &default_policy)
640 return;
641
45c4745a 642 switch (p->mode) {
19770b32
MG
643 case MPOL_BIND:
644 /* Fall through */
1da177e4 645 case MPOL_INTERLEAVE:
dfcd3c0d 646 *nodes = p->v.nodes;
1da177e4
LT
647 break;
648 case MPOL_PREFERRED:
fc36b8d3 649 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 650 node_set(p->v.preferred_node, *nodes);
53f2556b 651 /* else return empty node mask for local allocation */
1da177e4
LT
652 break;
653 default:
654 BUG();
655 }
656}
657
658static int lookup_node(struct mm_struct *mm, unsigned long addr)
659{
660 struct page *p;
661 int err;
662
663 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
664 if (err >= 0) {
665 err = page_to_nid(p);
666 put_page(p);
667 }
668 return err;
669}
670
1da177e4 671/* Retrieve NUMA policy */
dbcb0f19
AB
672static long do_get_mempolicy(int *policy, nodemask_t *nmask,
673 unsigned long addr, unsigned long flags)
1da177e4 674{
8bccd85f 675 int err;
1da177e4
LT
676 struct mm_struct *mm = current->mm;
677 struct vm_area_struct *vma = NULL;
678 struct mempolicy *pol = current->mempolicy;
679
cf2a473c 680 cpuset_update_task_memory_state();
754af6f5
LS
681 if (flags &
682 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 683 return -EINVAL;
754af6f5
LS
684
685 if (flags & MPOL_F_MEMS_ALLOWED) {
686 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
687 return -EINVAL;
688 *policy = 0; /* just so it's initialized */
689 *nmask = cpuset_current_mems_allowed;
690 return 0;
691 }
692
1da177e4 693 if (flags & MPOL_F_ADDR) {
bea904d5
LS
694 /*
695 * Do NOT fall back to task policy if the
696 * vma/shared policy at addr is NULL. We
697 * want to return MPOL_DEFAULT in this case.
698 */
1da177e4
LT
699 down_read(&mm->mmap_sem);
700 vma = find_vma_intersection(mm, addr, addr+1);
701 if (!vma) {
702 up_read(&mm->mmap_sem);
703 return -EFAULT;
704 }
705 if (vma->vm_ops && vma->vm_ops->get_policy)
706 pol = vma->vm_ops->get_policy(vma, addr);
707 else
708 pol = vma->vm_policy;
709 } else if (addr)
710 return -EINVAL;
711
712 if (!pol)
bea904d5 713 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
714
715 if (flags & MPOL_F_NODE) {
716 if (flags & MPOL_F_ADDR) {
717 err = lookup_node(mm, addr);
718 if (err < 0)
719 goto out;
8bccd85f 720 *policy = err;
1da177e4 721 } else if (pol == current->mempolicy &&
45c4745a 722 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 723 *policy = current->il_next;
1da177e4
LT
724 } else {
725 err = -EINVAL;
726 goto out;
727 }
bea904d5
LS
728 } else {
729 *policy = pol == &default_policy ? MPOL_DEFAULT :
730 pol->mode;
731 *policy |= pol->flags;
732 }
1da177e4
LT
733
734 if (vma) {
735 up_read(&current->mm->mmap_sem);
736 vma = NULL;
737 }
738
1da177e4 739 err = 0;
8bccd85f 740 if (nmask)
bea904d5 741 get_policy_nodemask(pol, nmask);
1da177e4
LT
742
743 out:
52cd3b07 744 mpol_cond_put(pol);
1da177e4
LT
745 if (vma)
746 up_read(&current->mm->mmap_sem);
747 return err;
748}
749
b20a3503 750#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
751/*
752 * page migration
753 */
fc301289
CL
754static void migrate_page_add(struct page *page, struct list_head *pagelist,
755 unsigned long flags)
6ce3c4c0
CL
756{
757 /*
fc301289 758 * Avoid migrating a page that is shared with others.
6ce3c4c0 759 */
b20a3503
CL
760 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
761 isolate_lru_page(page, pagelist);
7e2ab150 762}
6ce3c4c0 763
742755a1 764static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 765{
769848c0 766 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
767}
768
7e2ab150
CL
769/*
770 * Migrate pages from one node to a target node.
771 * Returns error or the number of pages not migrated.
772 */
dbcb0f19
AB
773static int migrate_to_node(struct mm_struct *mm, int source, int dest,
774 int flags)
7e2ab150
CL
775{
776 nodemask_t nmask;
777 LIST_HEAD(pagelist);
778 int err = 0;
779
780 nodes_clear(nmask);
781 node_set(source, nmask);
6ce3c4c0 782
7e2ab150
CL
783 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
784 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
785
aaa994b3 786 if (!list_empty(&pagelist))
95a402c3
CL
787 err = migrate_pages(&pagelist, new_node_page, dest);
788
7e2ab150 789 return err;
6ce3c4c0
CL
790}
791
39743889 792/*
7e2ab150
CL
793 * Move pages between the two nodesets so as to preserve the physical
794 * layout as much as possible.
39743889
CL
795 *
796 * Returns the number of page that could not be moved.
797 */
798int do_migrate_pages(struct mm_struct *mm,
799 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
800{
801 LIST_HEAD(pagelist);
7e2ab150
CL
802 int busy = 0;
803 int err = 0;
804 nodemask_t tmp;
39743889 805
53f2556b 806 down_read(&mm->mmap_sem);
39743889 807
7b2259b3
CL
808 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
809 if (err)
810 goto out;
811
7e2ab150
CL
812/*
813 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
814 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
815 * bit in 'tmp', and return that <source, dest> pair for migration.
816 * The pair of nodemasks 'to' and 'from' define the map.
817 *
818 * If no pair of bits is found that way, fallback to picking some
819 * pair of 'source' and 'dest' bits that are not the same. If the
820 * 'source' and 'dest' bits are the same, this represents a node
821 * that will be migrating to itself, so no pages need move.
822 *
823 * If no bits are left in 'tmp', or if all remaining bits left
824 * in 'tmp' correspond to the same bit in 'to', return false
825 * (nothing left to migrate).
826 *
827 * This lets us pick a pair of nodes to migrate between, such that
828 * if possible the dest node is not already occupied by some other
829 * source node, minimizing the risk of overloading the memory on a
830 * node that would happen if we migrated incoming memory to a node
831 * before migrating outgoing memory source that same node.
832 *
833 * A single scan of tmp is sufficient. As we go, we remember the
834 * most recent <s, d> pair that moved (s != d). If we find a pair
835 * that not only moved, but what's better, moved to an empty slot
836 * (d is not set in tmp), then we break out then, with that pair.
837 * Otherwise when we finish scannng from_tmp, we at least have the
838 * most recent <s, d> pair that moved. If we get all the way through
839 * the scan of tmp without finding any node that moved, much less
840 * moved to an empty node, then there is nothing left worth migrating.
841 */
d4984711 842
7e2ab150
CL
843 tmp = *from_nodes;
844 while (!nodes_empty(tmp)) {
845 int s,d;
846 int source = -1;
847 int dest = 0;
848
849 for_each_node_mask(s, tmp) {
850 d = node_remap(s, *from_nodes, *to_nodes);
851 if (s == d)
852 continue;
853
854 source = s; /* Node moved. Memorize */
855 dest = d;
856
857 /* dest not in remaining from nodes? */
858 if (!node_isset(dest, tmp))
859 break;
860 }
861 if (source == -1)
862 break;
863
864 node_clear(source, tmp);
865 err = migrate_to_node(mm, source, dest, flags);
866 if (err > 0)
867 busy += err;
868 if (err < 0)
869 break;
39743889 870 }
7b2259b3 871out:
39743889 872 up_read(&mm->mmap_sem);
7e2ab150
CL
873 if (err < 0)
874 return err;
875 return busy;
b20a3503
CL
876
877}
878
3ad33b24
LS
879/*
880 * Allocate a new page for page migration based on vma policy.
881 * Start assuming that page is mapped by vma pointed to by @private.
882 * Search forward from there, if not. N.B., this assumes that the
883 * list of pages handed to migrate_pages()--which is how we get here--
884 * is in virtual address order.
885 */
742755a1 886static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
887{
888 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 889 unsigned long uninitialized_var(address);
95a402c3 890
3ad33b24
LS
891 while (vma) {
892 address = page_address_in_vma(page, vma);
893 if (address != -EFAULT)
894 break;
895 vma = vma->vm_next;
896 }
897
898 /*
899 * if !vma, alloc_page_vma() will use task or system default policy
900 */
901 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 902}
b20a3503
CL
903#else
904
905static void migrate_page_add(struct page *page, struct list_head *pagelist,
906 unsigned long flags)
907{
39743889
CL
908}
909
b20a3503
CL
910int do_migrate_pages(struct mm_struct *mm,
911 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
912{
913 return -ENOSYS;
914}
95a402c3 915
69939749 916static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
917{
918 return NULL;
919}
b20a3503
CL
920#endif
921
dbcb0f19 922static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
923 unsigned short mode, unsigned short mode_flags,
924 nodemask_t *nmask, unsigned long flags)
6ce3c4c0
CL
925{
926 struct vm_area_struct *vma;
927 struct mm_struct *mm = current->mm;
928 struct mempolicy *new;
929 unsigned long end;
930 int err;
931 LIST_HEAD(pagelist);
932
a3b51e01
DR
933 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
934 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6ce3c4c0 935 return -EINVAL;
74c00241 936 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
937 return -EPERM;
938
939 if (start & ~PAGE_MASK)
940 return -EINVAL;
941
942 if (mode == MPOL_DEFAULT)
943 flags &= ~MPOL_MF_STRICT;
944
945 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
946 end = start + len;
947
948 if (end < start)
949 return -EINVAL;
950 if (end == start)
951 return 0;
952
028fec41 953 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
954 if (IS_ERR(new))
955 return PTR_ERR(new);
956
957 /*
958 * If we are using the default policy then operation
959 * on discontinuous address spaces is okay after all
960 */
961 if (!new)
962 flags |= MPOL_MF_DISCONTIG_OK;
963
028fec41
DR
964 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
965 start, start + len, mode, mode_flags,
966 nmask ? nodes_addr(*nmask)[0] : -1);
6ce3c4c0
CL
967
968 down_write(&mm->mmap_sem);
969 vma = check_range(mm, start, end, nmask,
970 flags | MPOL_MF_INVERT, &pagelist);
971
972 err = PTR_ERR(vma);
973 if (!IS_ERR(vma)) {
974 int nr_failed = 0;
975
976 err = mbind_range(vma, start, end, new);
7e2ab150 977
6ce3c4c0 978 if (!list_empty(&pagelist))
95a402c3
CL
979 nr_failed = migrate_pages(&pagelist, new_vma_page,
980 (unsigned long)vma);
6ce3c4c0
CL
981
982 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
983 err = -EIO;
984 }
b20a3503 985
6ce3c4c0 986 up_write(&mm->mmap_sem);
f0be3d32 987 mpol_put(new);
6ce3c4c0
CL
988 return err;
989}
990
8bccd85f
CL
991/*
992 * User space interface with variable sized bitmaps for nodelists.
993 */
994
995/* Copy a node mask from user space. */
39743889 996static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
997 unsigned long maxnode)
998{
999 unsigned long k;
1000 unsigned long nlongs;
1001 unsigned long endmask;
1002
1003 --maxnode;
1004 nodes_clear(*nodes);
1005 if (maxnode == 0 || !nmask)
1006 return 0;
a9c930ba 1007 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1008 return -EINVAL;
8bccd85f
CL
1009
1010 nlongs = BITS_TO_LONGS(maxnode);
1011 if ((maxnode % BITS_PER_LONG) == 0)
1012 endmask = ~0UL;
1013 else
1014 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1015
1016 /* When the user specified more nodes than supported just check
1017 if the non supported part is all zero. */
1018 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1019 if (nlongs > PAGE_SIZE/sizeof(long))
1020 return -EINVAL;
1021 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1022 unsigned long t;
1023 if (get_user(t, nmask + k))
1024 return -EFAULT;
1025 if (k == nlongs - 1) {
1026 if (t & endmask)
1027 return -EINVAL;
1028 } else if (t)
1029 return -EINVAL;
1030 }
1031 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1032 endmask = ~0UL;
1033 }
1034
1035 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1036 return -EFAULT;
1037 nodes_addr(*nodes)[nlongs-1] &= endmask;
1038 return 0;
1039}
1040
1041/* Copy a kernel node mask to user space */
1042static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1043 nodemask_t *nodes)
1044{
1045 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1046 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1047
1048 if (copy > nbytes) {
1049 if (copy > PAGE_SIZE)
1050 return -EINVAL;
1051 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1052 return -EFAULT;
1053 copy = nbytes;
1054 }
1055 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1056}
1057
1058asmlinkage long sys_mbind(unsigned long start, unsigned long len,
1059 unsigned long mode,
1060 unsigned long __user *nmask, unsigned long maxnode,
1061 unsigned flags)
1062{
1063 nodemask_t nodes;
1064 int err;
028fec41 1065 unsigned short mode_flags;
8bccd85f 1066
028fec41
DR
1067 mode_flags = mode & MPOL_MODE_FLAGS;
1068 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1069 if (mode >= MPOL_MAX)
1070 return -EINVAL;
4c50bc01
DR
1071 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1072 (mode_flags & MPOL_F_RELATIVE_NODES))
1073 return -EINVAL;
8bccd85f
CL
1074 err = get_nodes(&nodes, nmask, maxnode);
1075 if (err)
1076 return err;
028fec41 1077 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1078}
1079
1080/* Set the process memory policy */
1081asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
1082 unsigned long maxnode)
1083{
1084 int err;
1085 nodemask_t nodes;
028fec41 1086 unsigned short flags;
8bccd85f 1087
028fec41
DR
1088 flags = mode & MPOL_MODE_FLAGS;
1089 mode &= ~MPOL_MODE_FLAGS;
1090 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1091 return -EINVAL;
4c50bc01
DR
1092 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1093 return -EINVAL;
8bccd85f
CL
1094 err = get_nodes(&nodes, nmask, maxnode);
1095 if (err)
1096 return err;
028fec41 1097 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1098}
1099
39743889
CL
1100asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1101 const unsigned long __user *old_nodes,
1102 const unsigned long __user *new_nodes)
1103{
1104 struct mm_struct *mm;
1105 struct task_struct *task;
1106 nodemask_t old;
1107 nodemask_t new;
1108 nodemask_t task_nodes;
1109 int err;
1110
1111 err = get_nodes(&old, old_nodes, maxnode);
1112 if (err)
1113 return err;
1114
1115 err = get_nodes(&new, new_nodes, maxnode);
1116 if (err)
1117 return err;
1118
1119 /* Find the mm_struct */
1120 read_lock(&tasklist_lock);
228ebcbe 1121 task = pid ? find_task_by_vpid(pid) : current;
39743889
CL
1122 if (!task) {
1123 read_unlock(&tasklist_lock);
1124 return -ESRCH;
1125 }
1126 mm = get_task_mm(task);
1127 read_unlock(&tasklist_lock);
1128
1129 if (!mm)
1130 return -EINVAL;
1131
1132 /*
1133 * Check if this process has the right to modify the specified
1134 * process. The right exists if the process has administrative
7f927fcc 1135 * capabilities, superuser privileges or the same
39743889
CL
1136 * userid as the target process.
1137 */
1138 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1139 (current->uid != task->suid) && (current->uid != task->uid) &&
74c00241 1140 !capable(CAP_SYS_NICE)) {
39743889
CL
1141 err = -EPERM;
1142 goto out;
1143 }
1144
1145 task_nodes = cpuset_mems_allowed(task);
1146 /* Is the user allowed to access the target nodes? */
74c00241 1147 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
1148 err = -EPERM;
1149 goto out;
1150 }
1151
37b07e41 1152 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
3b42d28b
CL
1153 err = -EINVAL;
1154 goto out;
1155 }
1156
86c3a764
DQ
1157 err = security_task_movememory(task);
1158 if (err)
1159 goto out;
1160
511030bc 1161 err = do_migrate_pages(mm, &old, &new,
74c00241 1162 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
1163out:
1164 mmput(mm);
1165 return err;
1166}
1167
1168
8bccd85f
CL
1169/* Retrieve NUMA policy */
1170asmlinkage long sys_get_mempolicy(int __user *policy,
1171 unsigned long __user *nmask,
1172 unsigned long maxnode,
1173 unsigned long addr, unsigned long flags)
1174{
dbcb0f19
AB
1175 int err;
1176 int uninitialized_var(pval);
8bccd85f
CL
1177 nodemask_t nodes;
1178
1179 if (nmask != NULL && maxnode < MAX_NUMNODES)
1180 return -EINVAL;
1181
1182 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1183
1184 if (err)
1185 return err;
1186
1187 if (policy && put_user(pval, policy))
1188 return -EFAULT;
1189
1190 if (nmask)
1191 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1192
1193 return err;
1194}
1195
1da177e4
LT
1196#ifdef CONFIG_COMPAT
1197
1198asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1199 compat_ulong_t __user *nmask,
1200 compat_ulong_t maxnode,
1201 compat_ulong_t addr, compat_ulong_t flags)
1202{
1203 long err;
1204 unsigned long __user *nm = NULL;
1205 unsigned long nr_bits, alloc_size;
1206 DECLARE_BITMAP(bm, MAX_NUMNODES);
1207
1208 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1209 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1210
1211 if (nmask)
1212 nm = compat_alloc_user_space(alloc_size);
1213
1214 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1215
1216 if (!err && nmask) {
1217 err = copy_from_user(bm, nm, alloc_size);
1218 /* ensure entire bitmap is zeroed */
1219 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1220 err |= compat_put_bitmap(nmask, bm, nr_bits);
1221 }
1222
1223 return err;
1224}
1225
1226asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1227 compat_ulong_t maxnode)
1228{
1229 long err = 0;
1230 unsigned long __user *nm = NULL;
1231 unsigned long nr_bits, alloc_size;
1232 DECLARE_BITMAP(bm, MAX_NUMNODES);
1233
1234 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1235 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1236
1237 if (nmask) {
1238 err = compat_get_bitmap(bm, nmask, nr_bits);
1239 nm = compat_alloc_user_space(alloc_size);
1240 err |= copy_to_user(nm, bm, alloc_size);
1241 }
1242
1243 if (err)
1244 return -EFAULT;
1245
1246 return sys_set_mempolicy(mode, nm, nr_bits+1);
1247}
1248
1249asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1250 compat_ulong_t mode, compat_ulong_t __user *nmask,
1251 compat_ulong_t maxnode, compat_ulong_t flags)
1252{
1253 long err = 0;
1254 unsigned long __user *nm = NULL;
1255 unsigned long nr_bits, alloc_size;
dfcd3c0d 1256 nodemask_t bm;
1da177e4
LT
1257
1258 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1259 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1260
1261 if (nmask) {
dfcd3c0d 1262 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1263 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1264 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1265 }
1266
1267 if (err)
1268 return -EFAULT;
1269
1270 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1271}
1272
1273#endif
1274
480eccf9
LS
1275/*
1276 * get_vma_policy(@task, @vma, @addr)
1277 * @task - task for fallback if vma policy == default
1278 * @vma - virtual memory area whose policy is sought
1279 * @addr - address in @vma for shared policy lookup
1280 *
1281 * Returns effective policy for a VMA at specified address.
1282 * Falls back to @task or system default policy, as necessary.
52cd3b07
LS
1283 * Current or other task's task mempolicy and non-shared vma policies
1284 * are protected by the task's mmap_sem, which must be held for read by
1285 * the caller.
1286 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1287 * count--added by the get_policy() vm_op, as appropriate--to protect against
1288 * freeing by another task. It is the caller's responsibility to free the
1289 * extra reference for shared policies.
480eccf9 1290 */
ae4d8c16 1291static struct mempolicy *get_vma_policy(struct task_struct *task,
48fce342 1292 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1293{
6e21c8f1 1294 struct mempolicy *pol = task->mempolicy;
1da177e4
LT
1295
1296 if (vma) {
480eccf9 1297 if (vma->vm_ops && vma->vm_ops->get_policy) {
ae4d8c16
LS
1298 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1299 addr);
1300 if (vpol)
1301 pol = vpol;
bea904d5 1302 } else if (vma->vm_policy)
1da177e4
LT
1303 pol = vma->vm_policy;
1304 }
1305 if (!pol)
1306 pol = &default_policy;
1307 return pol;
1308}
1309
52cd3b07
LS
1310/*
1311 * Return a nodemask representing a mempolicy for filtering nodes for
1312 * page allocation
1313 */
1314static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1315{
1316 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1317 if (unlikely(policy->mode == MPOL_BIND) &&
19770b32
MG
1318 gfp_zone(gfp) >= policy_zone &&
1319 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1320 return &policy->v.nodes;
1321
1322 return NULL;
1323}
1324
52cd3b07
LS
1325/* Return a zonelist indicated by gfp for node representing a mempolicy */
1326static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1da177e4 1327{
fc36b8d3 1328 int nd = numa_node_id();
1da177e4 1329
45c4745a 1330 switch (policy->mode) {
1da177e4 1331 case MPOL_PREFERRED:
fc36b8d3
LS
1332 if (!(policy->flags & MPOL_F_LOCAL))
1333 nd = policy->v.preferred_node;
1da177e4
LT
1334 break;
1335 case MPOL_BIND:
19770b32 1336 /*
52cd3b07
LS
1337 * Normally, MPOL_BIND allocations are node-local within the
1338 * allowed nodemask. However, if __GFP_THISNODE is set and the
1339 * current node is part of the mask, we use the zonelist for
1340 * the first node in the mask instead.
19770b32 1341 */
19770b32
MG
1342 if (unlikely(gfp & __GFP_THISNODE) &&
1343 unlikely(!node_isset(nd, policy->v.nodes)))
1344 nd = first_node(policy->v.nodes);
1345 break;
1da177e4 1346 case MPOL_INTERLEAVE: /* should not happen */
1da177e4
LT
1347 break;
1348 default:
1da177e4
LT
1349 BUG();
1350 }
0e88460d 1351 return node_zonelist(nd, gfp);
1da177e4
LT
1352}
1353
1354/* Do dynamic interleaving for a process */
1355static unsigned interleave_nodes(struct mempolicy *policy)
1356{
1357 unsigned nid, next;
1358 struct task_struct *me = current;
1359
1360 nid = me->il_next;
dfcd3c0d 1361 next = next_node(nid, policy->v.nodes);
1da177e4 1362 if (next >= MAX_NUMNODES)
dfcd3c0d 1363 next = first_node(policy->v.nodes);
f5b087b5
DR
1364 if (next < MAX_NUMNODES)
1365 me->il_next = next;
1da177e4
LT
1366 return nid;
1367}
1368
dc85da15
CL
1369/*
1370 * Depending on the memory policy provide a node from which to allocate the
1371 * next slab entry.
52cd3b07
LS
1372 * @policy must be protected by freeing by the caller. If @policy is
1373 * the current task's mempolicy, this protection is implicit, as only the
1374 * task can change it's policy. The system default policy requires no
1375 * such protection.
dc85da15
CL
1376 */
1377unsigned slab_node(struct mempolicy *policy)
1378{
fc36b8d3 1379 if (!policy || policy->flags & MPOL_F_LOCAL)
bea904d5
LS
1380 return numa_node_id();
1381
1382 switch (policy->mode) {
1383 case MPOL_PREFERRED:
fc36b8d3
LS
1384 /*
1385 * handled MPOL_F_LOCAL above
1386 */
1387 return policy->v.preferred_node;
765c4507 1388
dc85da15
CL
1389 case MPOL_INTERLEAVE:
1390 return interleave_nodes(policy);
1391
dd1a239f 1392 case MPOL_BIND: {
dc85da15
CL
1393 /*
1394 * Follow bind policy behavior and start allocation at the
1395 * first node.
1396 */
19770b32
MG
1397 struct zonelist *zonelist;
1398 struct zone *zone;
1399 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1400 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1401 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1402 &policy->v.nodes,
1403 &zone);
1404 return zone->node;
dd1a239f 1405 }
dc85da15 1406
dc85da15 1407 default:
bea904d5 1408 BUG();
dc85da15
CL
1409 }
1410}
1411
1da177e4
LT
1412/* Do static interleaving for a VMA with known offset. */
1413static unsigned offset_il_node(struct mempolicy *pol,
1414 struct vm_area_struct *vma, unsigned long off)
1415{
dfcd3c0d 1416 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1417 unsigned target;
1da177e4
LT
1418 int c;
1419 int nid = -1;
1420
f5b087b5
DR
1421 if (!nnodes)
1422 return numa_node_id();
1423 target = (unsigned int)off % nnodes;
1da177e4
LT
1424 c = 0;
1425 do {
dfcd3c0d 1426 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1427 c++;
1428 } while (c <= target);
1da177e4
LT
1429 return nid;
1430}
1431
5da7ca86
CL
1432/* Determine a node number for interleave */
1433static inline unsigned interleave_nid(struct mempolicy *pol,
1434 struct vm_area_struct *vma, unsigned long addr, int shift)
1435{
1436 if (vma) {
1437 unsigned long off;
1438
3b98b087
NA
1439 /*
1440 * for small pages, there is no difference between
1441 * shift and PAGE_SHIFT, so the bit-shift is safe.
1442 * for huge pages, since vm_pgoff is in units of small
1443 * pages, we need to shift off the always 0 bits to get
1444 * a useful offset.
1445 */
1446 BUG_ON(shift < PAGE_SHIFT);
1447 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1448 off += (addr - vma->vm_start) >> shift;
1449 return offset_il_node(pol, vma, off);
1450 } else
1451 return interleave_nodes(pol);
1452}
1453
00ac59ad 1454#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1455/*
1456 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1457 * @vma = virtual memory area whose policy is sought
1458 * @addr = address in @vma for shared policy lookup and interleave policy
1459 * @gfp_flags = for requested zone
19770b32
MG
1460 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1461 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1462 *
52cd3b07
LS
1463 * Returns a zonelist suitable for a huge page allocation and a pointer
1464 * to the struct mempolicy for conditional unref after allocation.
1465 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1466 * @nodemask for filtering the zonelist.
480eccf9 1467 */
396faf03 1468struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1469 gfp_t gfp_flags, struct mempolicy **mpol,
1470 nodemask_t **nodemask)
5da7ca86 1471{
480eccf9 1472 struct zonelist *zl;
5da7ca86 1473
52cd3b07 1474 *mpol = get_vma_policy(current, vma, addr);
19770b32 1475 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1476
52cd3b07
LS
1477 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1478 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1479 HPAGE_SHIFT), gfp_flags);
1480 } else {
1481 zl = policy_zonelist(gfp_flags, *mpol);
1482 if ((*mpol)->mode == MPOL_BIND)
1483 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1484 }
1485 return zl;
5da7ca86 1486}
00ac59ad 1487#endif
5da7ca86 1488
1da177e4
LT
1489/* Allocate a page in interleaved policy.
1490 Own path because it needs to do special accounting. */
662f3a0b
AK
1491static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1492 unsigned nid)
1da177e4
LT
1493{
1494 struct zonelist *zl;
1495 struct page *page;
1496
0e88460d 1497 zl = node_zonelist(nid, gfp);
1da177e4 1498 page = __alloc_pages(gfp, order, zl);
dd1a239f 1499 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1500 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1501 return page;
1502}
1503
1504/**
1505 * alloc_page_vma - Allocate a page for a VMA.
1506 *
1507 * @gfp:
1508 * %GFP_USER user allocation.
1509 * %GFP_KERNEL kernel allocations,
1510 * %GFP_HIGHMEM highmem/user allocations,
1511 * %GFP_FS allocation should not call back into a file system.
1512 * %GFP_ATOMIC don't sleep.
1513 *
1514 * @vma: Pointer to VMA or NULL if not available.
1515 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1516 *
1517 * This function allocates a page from the kernel page pool and applies
1518 * a NUMA policy associated with the VMA or the current process.
1519 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1520 * mm_struct of the VMA to prevent it from going away. Should be used for
1521 * all allocations for pages that will be mapped into
1522 * user space. Returns NULL when no page can be allocated.
1523 *
1524 * Should be called with the mm_sem of the vma hold.
1525 */
1526struct page *
dd0fc66f 1527alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1528{
6e21c8f1 1529 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1530 struct zonelist *zl;
1da177e4 1531
cf2a473c 1532 cpuset_update_task_memory_state();
1da177e4 1533
45c4745a 1534 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1da177e4 1535 unsigned nid;
5da7ca86
CL
1536
1537 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
52cd3b07 1538 mpol_cond_put(pol);
1da177e4
LT
1539 return alloc_page_interleave(gfp, 0, nid);
1540 }
52cd3b07
LS
1541 zl = policy_zonelist(gfp, pol);
1542 if (unlikely(mpol_needs_cond_ref(pol))) {
480eccf9 1543 /*
52cd3b07 1544 * slow path: ref counted shared policy
480eccf9 1545 */
19770b32 1546 struct page *page = __alloc_pages_nodemask(gfp, 0,
52cd3b07 1547 zl, policy_nodemask(gfp, pol));
f0be3d32 1548 __mpol_put(pol);
480eccf9
LS
1549 return page;
1550 }
1551 /*
1552 * fast path: default or task policy
1553 */
52cd3b07 1554 return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1da177e4
LT
1555}
1556
1557/**
1558 * alloc_pages_current - Allocate pages.
1559 *
1560 * @gfp:
1561 * %GFP_USER user allocation,
1562 * %GFP_KERNEL kernel allocation,
1563 * %GFP_HIGHMEM highmem allocation,
1564 * %GFP_FS don't call back into a file system.
1565 * %GFP_ATOMIC don't sleep.
1566 * @order: Power of two of allocation size in pages. 0 is a single page.
1567 *
1568 * Allocate a page from the kernel page pool. When not in
1569 * interrupt context and apply the current process NUMA policy.
1570 * Returns NULL when no page can be allocated.
1571 *
cf2a473c 1572 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1573 * 1) it's ok to take cpuset_sem (can WAIT), and
1574 * 2) allocating for current task (not interrupt).
1575 */
dd0fc66f 1576struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1577{
1578 struct mempolicy *pol = current->mempolicy;
1579
1580 if ((gfp & __GFP_WAIT) && !in_interrupt())
cf2a473c 1581 cpuset_update_task_memory_state();
9b819d20 1582 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4 1583 pol = &default_policy;
52cd3b07
LS
1584
1585 /*
1586 * No reference counting needed for current->mempolicy
1587 * nor system default_policy
1588 */
45c4745a 1589 if (pol->mode == MPOL_INTERLEAVE)
1da177e4 1590 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
19770b32 1591 return __alloc_pages_nodemask(gfp, order,
52cd3b07 1592 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1da177e4
LT
1593}
1594EXPORT_SYMBOL(alloc_pages_current);
1595
4225399a 1596/*
846a16bf 1597 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
1598 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1599 * with the mems_allowed returned by cpuset_mems_allowed(). This
1600 * keeps mempolicies cpuset relative after its cpuset moves. See
1601 * further kernel/cpuset.c update_nodemask().
1602 */
4225399a 1603
846a16bf
LS
1604/* Slow path of a mempolicy duplicate */
1605struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
1606{
1607 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1608
1609 if (!new)
1610 return ERR_PTR(-ENOMEM);
4225399a
PJ
1611 if (current_cpuset_is_being_rebound()) {
1612 nodemask_t mems = cpuset_mems_allowed(current);
1613 mpol_rebind_policy(old, &mems);
1614 }
1da177e4
LT
1615 *new = *old;
1616 atomic_set(&new->refcnt, 1);
1da177e4
LT
1617 return new;
1618}
1619
52cd3b07
LS
1620/*
1621 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1622 * eliminate the * MPOL_F_* flags that require conditional ref and
1623 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1624 * after return. Use the returned value.
1625 *
1626 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1627 * policy lookup, even if the policy needs/has extra ref on lookup.
1628 * shmem_readahead needs this.
1629 */
1630struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1631 struct mempolicy *frompol)
1632{
1633 if (!mpol_needs_cond_ref(frompol))
1634 return frompol;
1635
1636 *tompol = *frompol;
1637 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1638 __mpol_put(frompol);
1639 return tompol;
1640}
1641
f5b087b5
DR
1642static int mpol_match_intent(const struct mempolicy *a,
1643 const struct mempolicy *b)
1644{
1645 if (a->flags != b->flags)
1646 return 0;
1647 if (!mpol_store_user_nodemask(a))
1648 return 1;
1649 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1650}
1651
1da177e4
LT
1652/* Slow path of a mempolicy comparison */
1653int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1654{
1655 if (!a || !b)
1656 return 0;
45c4745a 1657 if (a->mode != b->mode)
1da177e4 1658 return 0;
45c4745a 1659 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
f5b087b5 1660 return 0;
45c4745a 1661 switch (a->mode) {
19770b32
MG
1662 case MPOL_BIND:
1663 /* Fall through */
1da177e4 1664 case MPOL_INTERLEAVE:
dfcd3c0d 1665 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 1666 case MPOL_PREFERRED:
fc36b8d3
LS
1667 return a->v.preferred_node == b->v.preferred_node &&
1668 a->flags == b->flags;
1da177e4
LT
1669 default:
1670 BUG();
1671 return 0;
1672 }
1673}
1674
1da177e4
LT
1675/*
1676 * Shared memory backing store policy support.
1677 *
1678 * Remember policies even when nobody has shared memory mapped.
1679 * The policies are kept in Red-Black tree linked from the inode.
1680 * They are protected by the sp->lock spinlock, which should be held
1681 * for any accesses to the tree.
1682 */
1683
1684/* lookup first element intersecting start-end */
1685/* Caller holds sp->lock */
1686static struct sp_node *
1687sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1688{
1689 struct rb_node *n = sp->root.rb_node;
1690
1691 while (n) {
1692 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1693
1694 if (start >= p->end)
1695 n = n->rb_right;
1696 else if (end <= p->start)
1697 n = n->rb_left;
1698 else
1699 break;
1700 }
1701 if (!n)
1702 return NULL;
1703 for (;;) {
1704 struct sp_node *w = NULL;
1705 struct rb_node *prev = rb_prev(n);
1706 if (!prev)
1707 break;
1708 w = rb_entry(prev, struct sp_node, nd);
1709 if (w->end <= start)
1710 break;
1711 n = prev;
1712 }
1713 return rb_entry(n, struct sp_node, nd);
1714}
1715
1716/* Insert a new shared policy into the list. */
1717/* Caller holds sp->lock */
1718static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1719{
1720 struct rb_node **p = &sp->root.rb_node;
1721 struct rb_node *parent = NULL;
1722 struct sp_node *nd;
1723
1724 while (*p) {
1725 parent = *p;
1726 nd = rb_entry(parent, struct sp_node, nd);
1727 if (new->start < nd->start)
1728 p = &(*p)->rb_left;
1729 else if (new->end > nd->end)
1730 p = &(*p)->rb_right;
1731 else
1732 BUG();
1733 }
1734 rb_link_node(&new->nd, parent, p);
1735 rb_insert_color(&new->nd, &sp->root);
140d5a49 1736 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 1737 new->policy ? new->policy->mode : 0);
1da177e4
LT
1738}
1739
1740/* Find shared policy intersecting idx */
1741struct mempolicy *
1742mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1743{
1744 struct mempolicy *pol = NULL;
1745 struct sp_node *sn;
1746
1747 if (!sp->root.rb_node)
1748 return NULL;
1749 spin_lock(&sp->lock);
1750 sn = sp_lookup(sp, idx, idx+1);
1751 if (sn) {
1752 mpol_get(sn->policy);
1753 pol = sn->policy;
1754 }
1755 spin_unlock(&sp->lock);
1756 return pol;
1757}
1758
1759static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1760{
140d5a49 1761 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 1762 rb_erase(&n->nd, &sp->root);
f0be3d32 1763 mpol_put(n->policy);
1da177e4
LT
1764 kmem_cache_free(sn_cache, n);
1765}
1766
dbcb0f19
AB
1767static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1768 struct mempolicy *pol)
1da177e4
LT
1769{
1770 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1771
1772 if (!n)
1773 return NULL;
1774 n->start = start;
1775 n->end = end;
1776 mpol_get(pol);
aab0b102 1777 pol->flags |= MPOL_F_SHARED; /* for unref */
1da177e4
LT
1778 n->policy = pol;
1779 return n;
1780}
1781
1782/* Replace a policy range. */
1783static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1784 unsigned long end, struct sp_node *new)
1785{
1786 struct sp_node *n, *new2 = NULL;
1787
1788restart:
1789 spin_lock(&sp->lock);
1790 n = sp_lookup(sp, start, end);
1791 /* Take care of old policies in the same range. */
1792 while (n && n->start < end) {
1793 struct rb_node *next = rb_next(&n->nd);
1794 if (n->start >= start) {
1795 if (n->end <= end)
1796 sp_delete(sp, n);
1797 else
1798 n->start = end;
1799 } else {
1800 /* Old policy spanning whole new range. */
1801 if (n->end > end) {
1802 if (!new2) {
1803 spin_unlock(&sp->lock);
1804 new2 = sp_alloc(end, n->end, n->policy);
1805 if (!new2)
1806 return -ENOMEM;
1807 goto restart;
1808 }
1809 n->end = start;
1810 sp_insert(sp, new2);
1811 new2 = NULL;
1812 break;
1813 } else
1814 n->end = start;
1815 }
1816 if (!next)
1817 break;
1818 n = rb_entry(next, struct sp_node, nd);
1819 }
1820 if (new)
1821 sp_insert(sp, new);
1822 spin_unlock(&sp->lock);
1823 if (new2) {
f0be3d32 1824 mpol_put(new2->policy);
1da177e4
LT
1825 kmem_cache_free(sn_cache, new2);
1826 }
1827 return 0;
1828}
1829
a3b51e01 1830void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
028fec41 1831 unsigned short flags, nodemask_t *policy_nodes)
7339ff83
RH
1832{
1833 info->root = RB_ROOT;
1834 spin_lock_init(&info->lock);
1835
1836 if (policy != MPOL_DEFAULT) {
1837 struct mempolicy *newpol;
1838
bea904d5 1839 /* Falls back to NULL policy [MPOL_DEFAULT] on any error */
028fec41 1840 newpol = mpol_new(policy, flags, policy_nodes);
7339ff83
RH
1841 if (!IS_ERR(newpol)) {
1842 /* Create pseudo-vma that contains just the policy */
1843 struct vm_area_struct pvma;
1844
1845 memset(&pvma, 0, sizeof(struct vm_area_struct));
1846 /* Policy covers entire file */
1847 pvma.vm_end = TASK_SIZE;
1848 mpol_set_shared_policy(info, &pvma, newpol);
f0be3d32 1849 mpol_put(newpol);
7339ff83
RH
1850 }
1851 }
1852}
1853
1da177e4
LT
1854int mpol_set_shared_policy(struct shared_policy *info,
1855 struct vm_area_struct *vma, struct mempolicy *npol)
1856{
1857 int err;
1858 struct sp_node *new = NULL;
1859 unsigned long sz = vma_pages(vma);
1860
028fec41 1861 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 1862 vma->vm_pgoff,
45c4745a 1863 sz, npol ? npol->mode : -1,
028fec41 1864 npol ? npol->flags : -1,
140d5a49 1865 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
1866
1867 if (npol) {
1868 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1869 if (!new)
1870 return -ENOMEM;
1871 }
1872 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1873 if (err && new)
1874 kmem_cache_free(sn_cache, new);
1875 return err;
1876}
1877
1878/* Free a backing policy store on inode delete. */
1879void mpol_free_shared_policy(struct shared_policy *p)
1880{
1881 struct sp_node *n;
1882 struct rb_node *next;
1883
1884 if (!p->root.rb_node)
1885 return;
1886 spin_lock(&p->lock);
1887 next = rb_first(&p->root);
1888 while (next) {
1889 n = rb_entry(next, struct sp_node, nd);
1890 next = rb_next(&n->nd);
90c5029e 1891 rb_erase(&n->nd, &p->root);
f0be3d32 1892 mpol_put(n->policy);
1da177e4
LT
1893 kmem_cache_free(sn_cache, n);
1894 }
1895 spin_unlock(&p->lock);
1da177e4
LT
1896}
1897
1898/* assumes fs == KERNEL_DS */
1899void __init numa_policy_init(void)
1900{
b71636e2
PM
1901 nodemask_t interleave_nodes;
1902 unsigned long largest = 0;
1903 int nid, prefer = 0;
1904
1da177e4
LT
1905 policy_cache = kmem_cache_create("numa_policy",
1906 sizeof(struct mempolicy),
20c2df83 1907 0, SLAB_PANIC, NULL);
1da177e4
LT
1908
1909 sn_cache = kmem_cache_create("shared_policy_node",
1910 sizeof(struct sp_node),
20c2df83 1911 0, SLAB_PANIC, NULL);
1da177e4 1912
b71636e2
PM
1913 /*
1914 * Set interleaving policy for system init. Interleaving is only
1915 * enabled across suitably sized nodes (default is >= 16MB), or
1916 * fall back to the largest node if they're all smaller.
1917 */
1918 nodes_clear(interleave_nodes);
56bbd65d 1919 for_each_node_state(nid, N_HIGH_MEMORY) {
b71636e2
PM
1920 unsigned long total_pages = node_present_pages(nid);
1921
1922 /* Preserve the largest node */
1923 if (largest < total_pages) {
1924 largest = total_pages;
1925 prefer = nid;
1926 }
1927
1928 /* Interleave this node? */
1929 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1930 node_set(nid, interleave_nodes);
1931 }
1932
1933 /* All too small, use the largest */
1934 if (unlikely(nodes_empty(interleave_nodes)))
1935 node_set(prefer, interleave_nodes);
1da177e4 1936
028fec41 1937 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1da177e4
LT
1938 printk("numa_policy_init: interleaving failed\n");
1939}
1940
8bccd85f 1941/* Reset policy of current process to default */
1da177e4
LT
1942void numa_default_policy(void)
1943{
028fec41 1944 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 1945}
68860ec1 1946
1a75a6c8 1947/*
fc36b8d3 1948 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
53f2556b 1949 * Used only for mpol_to_str()
1a75a6c8 1950 */
53f2556b 1951#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
15ad7cdc 1952static const char * const policy_types[] =
53f2556b 1953 { "default", "prefer", "bind", "interleave", "local" };
1a75a6c8
CL
1954
1955/*
1956 * Convert a mempolicy into a string.
1957 * Returns the number of characters in buffer (if positive)
1958 * or an error (negative)
1959 */
1960static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1961{
1962 char *p = buffer;
1963 int l;
1964 nodemask_t nodes;
bea904d5 1965 unsigned short mode;
f5b087b5 1966 unsigned short flags = pol ? pol->flags : 0;
1a75a6c8 1967
bea904d5
LS
1968 if (!pol || pol == &default_policy)
1969 mode = MPOL_DEFAULT;
1970 else
1971 mode = pol->mode;
1972
1a75a6c8
CL
1973 switch (mode) {
1974 case MPOL_DEFAULT:
1975 nodes_clear(nodes);
1976 break;
1977
1978 case MPOL_PREFERRED:
1979 nodes_clear(nodes);
fc36b8d3 1980 if (flags & MPOL_F_LOCAL)
53f2556b
LS
1981 mode = MPOL_LOCAL; /* pseudo-policy */
1982 else
fc36b8d3 1983 node_set(pol->v.preferred_node, nodes);
1a75a6c8
CL
1984 break;
1985
1986 case MPOL_BIND:
19770b32 1987 /* Fall through */
1a75a6c8
CL
1988 case MPOL_INTERLEAVE:
1989 nodes = pol->v.nodes;
1990 break;
1991
1992 default:
1993 BUG();
1994 return -EFAULT;
1995 }
1996
1997 l = strlen(policy_types[mode]);
53f2556b
LS
1998 if (buffer + maxlen < p + l + 1)
1999 return -ENOSPC;
1a75a6c8
CL
2000
2001 strcpy(p, policy_types[mode]);
2002 p += l;
2003
fc36b8d3 2004 if (flags & MPOL_MODE_FLAGS) {
f5b087b5
DR
2005 int need_bar = 0;
2006
2007 if (buffer + maxlen < p + 2)
2008 return -ENOSPC;
2009 *p++ = '=';
2010
2011 if (flags & MPOL_F_STATIC_NODES)
2012 p += sprintf(p, "%sstatic", need_bar++ ? "|" : "");
4c50bc01
DR
2013 if (flags & MPOL_F_RELATIVE_NODES)
2014 p += sprintf(p, "%srelative", need_bar++ ? "|" : "");
f5b087b5
DR
2015 }
2016
1a75a6c8
CL
2017 if (!nodes_empty(nodes)) {
2018 if (buffer + maxlen < p + 2)
2019 return -ENOSPC;
2020 *p++ = '=';
2021 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2022 }
2023 return p - buffer;
2024}
2025
2026struct numa_maps {
2027 unsigned long pages;
2028 unsigned long anon;
397874df
CL
2029 unsigned long active;
2030 unsigned long writeback;
1a75a6c8 2031 unsigned long mapcount_max;
397874df
CL
2032 unsigned long dirty;
2033 unsigned long swapcache;
1a75a6c8
CL
2034 unsigned long node[MAX_NUMNODES];
2035};
2036
397874df 2037static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
2038{
2039 struct numa_maps *md = private;
2040 int count = page_mapcount(page);
2041
397874df
CL
2042 md->pages++;
2043 if (pte_dirty || PageDirty(page))
2044 md->dirty++;
1a75a6c8 2045
397874df
CL
2046 if (PageSwapCache(page))
2047 md->swapcache++;
1a75a6c8 2048
397874df
CL
2049 if (PageActive(page))
2050 md->active++;
2051
2052 if (PageWriteback(page))
2053 md->writeback++;
1a75a6c8
CL
2054
2055 if (PageAnon(page))
2056 md->anon++;
2057
397874df
CL
2058 if (count > md->mapcount_max)
2059 md->mapcount_max = count;
2060
1a75a6c8 2061 md->node[page_to_nid(page)]++;
1a75a6c8
CL
2062}
2063
7f709ed0 2064#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
2065static void check_huge_range(struct vm_area_struct *vma,
2066 unsigned long start, unsigned long end,
2067 struct numa_maps *md)
2068{
2069 unsigned long addr;
2070 struct page *page;
2071
2072 for (addr = start; addr < end; addr += HPAGE_SIZE) {
2073 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
2074 pte_t pte;
2075
2076 if (!ptep)
2077 continue;
2078
2079 pte = *ptep;
2080 if (pte_none(pte))
2081 continue;
2082
2083 page = pte_page(pte);
2084 if (!page)
2085 continue;
2086
2087 gather_stats(page, md, pte_dirty(*ptep));
2088 }
2089}
7f709ed0
AM
2090#else
2091static inline void check_huge_range(struct vm_area_struct *vma,
2092 unsigned long start, unsigned long end,
2093 struct numa_maps *md)
2094{
2095}
2096#endif
397874df 2097
53f2556b
LS
2098/*
2099 * Display pages allocated per node and memory policy via /proc.
2100 */
1a75a6c8
CL
2101int show_numa_map(struct seq_file *m, void *v)
2102{
99f89551 2103 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
2104 struct vm_area_struct *vma = v;
2105 struct numa_maps *md;
397874df
CL
2106 struct file *file = vma->vm_file;
2107 struct mm_struct *mm = vma->vm_mm;
480eccf9 2108 struct mempolicy *pol;
1a75a6c8
CL
2109 int n;
2110 char buffer[50];
2111
397874df 2112 if (!mm)
1a75a6c8
CL
2113 return 0;
2114
2115 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2116 if (!md)
2117 return 0;
2118
480eccf9
LS
2119 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2120 mpol_to_str(buffer, sizeof(buffer), pol);
52cd3b07 2121 mpol_cond_put(pol);
397874df
CL
2122
2123 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2124
2125 if (file) {
2126 seq_printf(m, " file=");
c32c2f63 2127 seq_path(m, &file->f_path, "\n\t= ");
397874df
CL
2128 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2129 seq_printf(m, " heap");
2130 } else if (vma->vm_start <= mm->start_stack &&
2131 vma->vm_end >= mm->start_stack) {
2132 seq_printf(m, " stack");
2133 }
2134
2135 if (is_vm_hugetlb_page(vma)) {
2136 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2137 seq_printf(m, " huge");
2138 } else {
a57ebfdb 2139 check_pgd_range(vma, vma->vm_start, vma->vm_end,
56bbd65d 2140 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
397874df
CL
2141 }
2142
2143 if (!md->pages)
2144 goto out;
1a75a6c8 2145
397874df
CL
2146 if (md->anon)
2147 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 2148
397874df
CL
2149 if (md->dirty)
2150 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 2151
397874df
CL
2152 if (md->pages != md->anon && md->pages != md->dirty)
2153 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 2154
397874df
CL
2155 if (md->mapcount_max > 1)
2156 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 2157
397874df
CL
2158 if (md->swapcache)
2159 seq_printf(m," swapcache=%lu", md->swapcache);
2160
2161 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2162 seq_printf(m," active=%lu", md->active);
2163
2164 if (md->writeback)
2165 seq_printf(m," writeback=%lu", md->writeback);
2166
56bbd65d 2167 for_each_node_state(n, N_HIGH_MEMORY)
397874df
CL
2168 if (md->node[n])
2169 seq_printf(m, " N%d=%lu", n, md->node[n]);
2170out:
2171 seq_putc(m, '\n');
1a75a6c8
CL
2172 kfree(md);
2173
2174 if (m->count < m->size)
99f89551 2175 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
2176 return 0;
2177}