]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/mempolicy.c
mm: move migrate_prep out from under mmap_sem
[net-next-2.6.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
1da177e4
LT
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/gfp.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/module.h>
b488893a 80#include <linux/nsproxy.h>
1da177e4
LT
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
dc9aa5b9 84#include <linux/swap.h>
1a75a6c8
CL
85#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
b20a3503 87#include <linux/migrate.h>
95a402c3 88#include <linux/rmap.h>
86c3a764 89#include <linux/security.h>
dbcb0f19 90#include <linux/syscalls.h>
095f1fc4 91#include <linux/ctype.h>
dc9aa5b9 92
1da177e4
LT
93#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
62695a84
NP
96#include "internal.h"
97
38e35860 98/* Internal flags */
dc9aa5b9 99#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 100#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 101#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 102
fcc234f8
PE
103static struct kmem_cache *policy_cache;
104static struct kmem_cache *sn_cache;
1da177e4 105
1da177e4
LT
106/* Highest zone. An specific allocation for a zone below that is not
107 policied. */
6267276f 108enum zone_type policy_zone = 0;
1da177e4 109
bea904d5
LS
110/*
111 * run-time system-wide default policy => local allocation
112 */
d42c6997 113struct mempolicy default_policy = {
1da177e4 114 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 115 .mode = MPOL_PREFERRED,
fc36b8d3 116 .flags = MPOL_F_LOCAL,
1da177e4
LT
117};
118
37012946
DR
119static const struct mempolicy_operations {
120 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
121 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
122} mpol_ops[MPOL_MAX];
123
19770b32 124/* Check that the nodemask contains at least one populated zone */
37012946 125static int is_valid_nodemask(const nodemask_t *nodemask)
1da177e4 126{
19770b32 127 int nd, k;
1da177e4 128
19770b32
MG
129 /* Check that there is something useful in this mask */
130 k = policy_zone;
131
132 for_each_node_mask(nd, *nodemask) {
133 struct zone *z;
134
135 for (k = 0; k <= policy_zone; k++) {
136 z = &NODE_DATA(nd)->node_zones[k];
137 if (z->present_pages > 0)
138 return 1;
dd942ae3 139 }
8af5e2eb 140 }
19770b32
MG
141
142 return 0;
1da177e4
LT
143}
144
f5b087b5
DR
145static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
146{
4c50bc01
DR
147 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
148}
149
150static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
151 const nodemask_t *rel)
152{
153 nodemask_t tmp;
154 nodes_fold(tmp, *orig, nodes_weight(*rel));
155 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
156}
157
37012946
DR
158static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
159{
160 if (nodes_empty(*nodes))
161 return -EINVAL;
162 pol->v.nodes = *nodes;
163 return 0;
164}
165
166static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
167{
168 if (!nodes)
fc36b8d3 169 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
170 else if (nodes_empty(*nodes))
171 return -EINVAL; /* no allowed nodes */
172 else
173 pol->v.preferred_node = first_node(*nodes);
174 return 0;
175}
176
177static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (!is_valid_nodemask(nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
1da177e4 185/* Create a new policy */
028fec41
DR
186static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
187 nodemask_t *nodes)
1da177e4
LT
188{
189 struct mempolicy *policy;
f5b087b5 190 nodemask_t cpuset_context_nmask;
37012946 191 int ret;
1da177e4 192
028fec41
DR
193 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
194 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
140d5a49 195
3e1f0645
DR
196 if (mode == MPOL_DEFAULT) {
197 if (nodes && !nodes_empty(*nodes))
37012946 198 return ERR_PTR(-EINVAL);
bea904d5 199 return NULL; /* simply delete any existing policy */
37012946 200 }
3e1f0645
DR
201 VM_BUG_ON(!nodes);
202
203 /*
204 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
205 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
206 * All other modes require a valid pointer to a non-empty nodemask.
207 */
208 if (mode == MPOL_PREFERRED) {
209 if (nodes_empty(*nodes)) {
210 if (((flags & MPOL_F_STATIC_NODES) ||
211 (flags & MPOL_F_RELATIVE_NODES)))
212 return ERR_PTR(-EINVAL);
213 nodes = NULL; /* flag local alloc */
214 }
215 } else if (nodes_empty(*nodes))
216 return ERR_PTR(-EINVAL);
1da177e4
LT
217 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
218 if (!policy)
219 return ERR_PTR(-ENOMEM);
220 atomic_set(&policy->refcnt, 1);
45c4745a 221 policy->mode = mode;
3e1f0645 222 policy->flags = flags;
37012946 223
3e1f0645
DR
224 if (nodes) {
225 /*
226 * cpuset related setup doesn't apply to local allocation
227 */
37012946
DR
228 cpuset_update_task_memory_state();
229 if (flags & MPOL_F_RELATIVE_NODES)
230 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
231 &cpuset_current_mems_allowed);
232 else
233 nodes_and(cpuset_context_nmask, *nodes,
234 cpuset_current_mems_allowed);
235 if (mpol_store_user_nodemask(policy))
236 policy->w.user_nodemask = *nodes;
237 else
238 policy->w.cpuset_mems_allowed =
239 cpuset_mems_allowed(current);
240 }
241
242 ret = mpol_ops[mode].create(policy,
3e1f0645 243 nodes ? &cpuset_context_nmask : NULL);
37012946
DR
244 if (ret < 0) {
245 kmem_cache_free(policy_cache, policy);
246 return ERR_PTR(ret);
247 }
1da177e4 248 return policy;
37012946
DR
249}
250
52cd3b07
LS
251/* Slow path of a mpol destructor. */
252void __mpol_put(struct mempolicy *p)
253{
254 if (!atomic_dec_and_test(&p->refcnt))
255 return;
52cd3b07
LS
256 kmem_cache_free(policy_cache, p);
257}
258
37012946
DR
259static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
260{
261}
262
263static void mpol_rebind_nodemask(struct mempolicy *pol,
264 const nodemask_t *nodes)
265{
266 nodemask_t tmp;
267
268 if (pol->flags & MPOL_F_STATIC_NODES)
269 nodes_and(tmp, pol->w.user_nodemask, *nodes);
270 else if (pol->flags & MPOL_F_RELATIVE_NODES)
271 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
272 else {
273 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
274 *nodes);
275 pol->w.cpuset_mems_allowed = *nodes;
276 }
f5b087b5 277
37012946
DR
278 pol->v.nodes = tmp;
279 if (!node_isset(current->il_next, tmp)) {
280 current->il_next = next_node(current->il_next, tmp);
281 if (current->il_next >= MAX_NUMNODES)
282 current->il_next = first_node(tmp);
283 if (current->il_next >= MAX_NUMNODES)
284 current->il_next = numa_node_id();
285 }
286}
287
288static void mpol_rebind_preferred(struct mempolicy *pol,
289 const nodemask_t *nodes)
290{
291 nodemask_t tmp;
292
37012946
DR
293 if (pol->flags & MPOL_F_STATIC_NODES) {
294 int node = first_node(pol->w.user_nodemask);
295
fc36b8d3 296 if (node_isset(node, *nodes)) {
37012946 297 pol->v.preferred_node = node;
fc36b8d3
LS
298 pol->flags &= ~MPOL_F_LOCAL;
299 } else
300 pol->flags |= MPOL_F_LOCAL;
37012946
DR
301 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
302 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
303 pol->v.preferred_node = first_node(tmp);
fc36b8d3 304 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
305 pol->v.preferred_node = node_remap(pol->v.preferred_node,
306 pol->w.cpuset_mems_allowed,
307 *nodes);
308 pol->w.cpuset_mems_allowed = *nodes;
309 }
1da177e4
LT
310}
311
1d0d2680
DR
312/* Migrate a policy to a different set of nodes */
313static void mpol_rebind_policy(struct mempolicy *pol,
314 const nodemask_t *newmask)
315{
1d0d2680
DR
316 if (!pol)
317 return;
1d0d2680
DR
318 if (!mpol_store_user_nodemask(pol) &&
319 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
320 return;
45c4745a 321 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
322}
323
324/*
325 * Wrapper for mpol_rebind_policy() that just requires task
326 * pointer, and updates task mempolicy.
327 */
328
329void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
330{
331 mpol_rebind_policy(tsk->mempolicy, new);
332}
333
334/*
335 * Rebind each vma in mm to new nodemask.
336 *
337 * Call holding a reference to mm. Takes mm->mmap_sem during call.
338 */
339
340void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
341{
342 struct vm_area_struct *vma;
343
344 down_write(&mm->mmap_sem);
345 for (vma = mm->mmap; vma; vma = vma->vm_next)
346 mpol_rebind_policy(vma->vm_policy, new);
347 up_write(&mm->mmap_sem);
348}
349
37012946
DR
350static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
351 [MPOL_DEFAULT] = {
352 .rebind = mpol_rebind_default,
353 },
354 [MPOL_INTERLEAVE] = {
355 .create = mpol_new_interleave,
356 .rebind = mpol_rebind_nodemask,
357 },
358 [MPOL_PREFERRED] = {
359 .create = mpol_new_preferred,
360 .rebind = mpol_rebind_preferred,
361 },
362 [MPOL_BIND] = {
363 .create = mpol_new_bind,
364 .rebind = mpol_rebind_nodemask,
365 },
366};
367
397874df 368static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
369static void migrate_page_add(struct page *page, struct list_head *pagelist,
370 unsigned long flags);
1a75a6c8 371
38e35860 372/* Scan through pages checking if pages follow certain conditions. */
b5810039 373static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
374 unsigned long addr, unsigned long end,
375 const nodemask_t *nodes, unsigned long flags,
38e35860 376 void *private)
1da177e4 377{
91612e0d
HD
378 pte_t *orig_pte;
379 pte_t *pte;
705e87c0 380 spinlock_t *ptl;
941150a3 381
705e87c0 382 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 383 do {
6aab341e 384 struct page *page;
25ba77c1 385 int nid;
91612e0d
HD
386
387 if (!pte_present(*pte))
1da177e4 388 continue;
6aab341e
LT
389 page = vm_normal_page(vma, addr, *pte);
390 if (!page)
1da177e4 391 continue;
053837fc
NP
392 /*
393 * The check for PageReserved here is important to avoid
394 * handling zero pages and other pages that may have been
395 * marked special by the system.
396 *
397 * If the PageReserved would not be checked here then f.e.
398 * the location of the zero page could have an influence
399 * on MPOL_MF_STRICT, zero pages would be counted for
400 * the per node stats, and there would be useless attempts
401 * to put zero pages on the migration list.
402 */
f4598c8b
CL
403 if (PageReserved(page))
404 continue;
6aab341e 405 nid = page_to_nid(page);
38e35860
CL
406 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
407 continue;
408
1a75a6c8 409 if (flags & MPOL_MF_STATS)
397874df 410 gather_stats(page, private, pte_dirty(*pte));
053837fc 411 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 412 migrate_page_add(page, private, flags);
38e35860
CL
413 else
414 break;
91612e0d 415 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 416 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
417 return addr != end;
418}
419
b5810039 420static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
421 unsigned long addr, unsigned long end,
422 const nodemask_t *nodes, unsigned long flags,
38e35860 423 void *private)
91612e0d
HD
424{
425 pmd_t *pmd;
426 unsigned long next;
427
428 pmd = pmd_offset(pud, addr);
429 do {
430 next = pmd_addr_end(addr, end);
431 if (pmd_none_or_clear_bad(pmd))
432 continue;
dc9aa5b9 433 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 434 flags, private))
91612e0d
HD
435 return -EIO;
436 } while (pmd++, addr = next, addr != end);
437 return 0;
438}
439
b5810039 440static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
441 unsigned long addr, unsigned long end,
442 const nodemask_t *nodes, unsigned long flags,
38e35860 443 void *private)
91612e0d
HD
444{
445 pud_t *pud;
446 unsigned long next;
447
448 pud = pud_offset(pgd, addr);
449 do {
450 next = pud_addr_end(addr, end);
451 if (pud_none_or_clear_bad(pud))
452 continue;
dc9aa5b9 453 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 454 flags, private))
91612e0d
HD
455 return -EIO;
456 } while (pud++, addr = next, addr != end);
457 return 0;
458}
459
b5810039 460static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
461 unsigned long addr, unsigned long end,
462 const nodemask_t *nodes, unsigned long flags,
38e35860 463 void *private)
91612e0d
HD
464{
465 pgd_t *pgd;
466 unsigned long next;
467
b5810039 468 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
469 do {
470 next = pgd_addr_end(addr, end);
471 if (pgd_none_or_clear_bad(pgd))
472 continue;
dc9aa5b9 473 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 474 flags, private))
91612e0d
HD
475 return -EIO;
476 } while (pgd++, addr = next, addr != end);
477 return 0;
1da177e4
LT
478}
479
dc9aa5b9
CL
480/*
481 * Check if all pages in a range are on a set of nodes.
482 * If pagelist != NULL then isolate pages from the LRU and
483 * put them on the pagelist.
484 */
1da177e4
LT
485static struct vm_area_struct *
486check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 487 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
488{
489 int err;
490 struct vm_area_struct *first, *vma, *prev;
491
053837fc 492
1da177e4
LT
493 first = find_vma(mm, start);
494 if (!first)
495 return ERR_PTR(-EFAULT);
496 prev = NULL;
497 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
498 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
499 if (!vma->vm_next && vma->vm_end < end)
500 return ERR_PTR(-EFAULT);
501 if (prev && prev->vm_end < vma->vm_start)
502 return ERR_PTR(-EFAULT);
503 }
504 if (!is_vm_hugetlb_page(vma) &&
505 ((flags & MPOL_MF_STRICT) ||
506 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
507 vma_migratable(vma)))) {
5b952b3c 508 unsigned long endvma = vma->vm_end;
dc9aa5b9 509
5b952b3c
AK
510 if (endvma > end)
511 endvma = end;
512 if (vma->vm_start > start)
513 start = vma->vm_start;
dc9aa5b9 514 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 515 flags, private);
1da177e4
LT
516 if (err) {
517 first = ERR_PTR(err);
518 break;
519 }
520 }
521 prev = vma;
522 }
523 return first;
524}
525
526/* Apply policy to a single VMA */
527static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
528{
529 int err = 0;
530 struct mempolicy *old = vma->vm_policy;
531
140d5a49 532 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
1da177e4
LT
533 vma->vm_start, vma->vm_end, vma->vm_pgoff,
534 vma->vm_ops, vma->vm_file,
535 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
536
537 if (vma->vm_ops && vma->vm_ops->set_policy)
538 err = vma->vm_ops->set_policy(vma, new);
539 if (!err) {
540 mpol_get(new);
541 vma->vm_policy = new;
f0be3d32 542 mpol_put(old);
1da177e4
LT
543 }
544 return err;
545}
546
547/* Step 2: apply policy to a range and do splits. */
548static int mbind_range(struct vm_area_struct *vma, unsigned long start,
549 unsigned long end, struct mempolicy *new)
550{
551 struct vm_area_struct *next;
552 int err;
553
554 err = 0;
555 for (; vma && vma->vm_start < end; vma = next) {
556 next = vma->vm_next;
557 if (vma->vm_start < start)
558 err = split_vma(vma->vm_mm, vma, start, 1);
559 if (!err && vma->vm_end > end)
560 err = split_vma(vma->vm_mm, vma, end, 0);
561 if (!err)
562 err = policy_vma(vma, new);
563 if (err)
564 break;
565 }
566 return err;
567}
568
c61afb18
PJ
569/*
570 * Update task->flags PF_MEMPOLICY bit: set iff non-default
571 * mempolicy. Allows more rapid checking of this (combined perhaps
572 * with other PF_* flag bits) on memory allocation hot code paths.
573 *
574 * If called from outside this file, the task 'p' should -only- be
575 * a newly forked child not yet visible on the task list, because
576 * manipulating the task flags of a visible task is not safe.
577 *
578 * The above limitation is why this routine has the funny name
579 * mpol_fix_fork_child_flag().
580 *
581 * It is also safe to call this with a task pointer of current,
582 * which the static wrapper mpol_set_task_struct_flag() does,
583 * for use within this file.
584 */
585
586void mpol_fix_fork_child_flag(struct task_struct *p)
587{
588 if (p->mempolicy)
589 p->flags |= PF_MEMPOLICY;
590 else
591 p->flags &= ~PF_MEMPOLICY;
592}
593
594static void mpol_set_task_struct_flag(void)
595{
596 mpol_fix_fork_child_flag(current);
597}
598
1da177e4 599/* Set the process memory policy */
028fec41
DR
600static long do_set_mempolicy(unsigned short mode, unsigned short flags,
601 nodemask_t *nodes)
1da177e4 602{
1da177e4 603 struct mempolicy *new;
f4e53d91 604 struct mm_struct *mm = current->mm;
1da177e4 605
028fec41 606 new = mpol_new(mode, flags, nodes);
1da177e4
LT
607 if (IS_ERR(new))
608 return PTR_ERR(new);
f4e53d91
LS
609
610 /*
611 * prevent changing our mempolicy while show_numa_maps()
612 * is using it.
613 * Note: do_set_mempolicy() can be called at init time
614 * with no 'mm'.
615 */
616 if (mm)
617 down_write(&mm->mmap_sem);
f0be3d32 618 mpol_put(current->mempolicy);
1da177e4 619 current->mempolicy = new;
c61afb18 620 mpol_set_task_struct_flag();
45c4745a 621 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 622 nodes_weight(new->v.nodes))
dfcd3c0d 623 current->il_next = first_node(new->v.nodes);
f4e53d91
LS
624 if (mm)
625 up_write(&mm->mmap_sem);
626
1da177e4
LT
627 return 0;
628}
629
bea904d5
LS
630/*
631 * Return nodemask for policy for get_mempolicy() query
632 */
633static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 634{
dfcd3c0d 635 nodes_clear(*nodes);
bea904d5
LS
636 if (p == &default_policy)
637 return;
638
45c4745a 639 switch (p->mode) {
19770b32
MG
640 case MPOL_BIND:
641 /* Fall through */
1da177e4 642 case MPOL_INTERLEAVE:
dfcd3c0d 643 *nodes = p->v.nodes;
1da177e4
LT
644 break;
645 case MPOL_PREFERRED:
fc36b8d3 646 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 647 node_set(p->v.preferred_node, *nodes);
53f2556b 648 /* else return empty node mask for local allocation */
1da177e4
LT
649 break;
650 default:
651 BUG();
652 }
653}
654
655static int lookup_node(struct mm_struct *mm, unsigned long addr)
656{
657 struct page *p;
658 int err;
659
660 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
661 if (err >= 0) {
662 err = page_to_nid(p);
663 put_page(p);
664 }
665 return err;
666}
667
1da177e4 668/* Retrieve NUMA policy */
dbcb0f19
AB
669static long do_get_mempolicy(int *policy, nodemask_t *nmask,
670 unsigned long addr, unsigned long flags)
1da177e4 671{
8bccd85f 672 int err;
1da177e4
LT
673 struct mm_struct *mm = current->mm;
674 struct vm_area_struct *vma = NULL;
675 struct mempolicy *pol = current->mempolicy;
676
cf2a473c 677 cpuset_update_task_memory_state();
754af6f5
LS
678 if (flags &
679 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 680 return -EINVAL;
754af6f5
LS
681
682 if (flags & MPOL_F_MEMS_ALLOWED) {
683 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
684 return -EINVAL;
685 *policy = 0; /* just so it's initialized */
686 *nmask = cpuset_current_mems_allowed;
687 return 0;
688 }
689
1da177e4 690 if (flags & MPOL_F_ADDR) {
bea904d5
LS
691 /*
692 * Do NOT fall back to task policy if the
693 * vma/shared policy at addr is NULL. We
694 * want to return MPOL_DEFAULT in this case.
695 */
1da177e4
LT
696 down_read(&mm->mmap_sem);
697 vma = find_vma_intersection(mm, addr, addr+1);
698 if (!vma) {
699 up_read(&mm->mmap_sem);
700 return -EFAULT;
701 }
702 if (vma->vm_ops && vma->vm_ops->get_policy)
703 pol = vma->vm_ops->get_policy(vma, addr);
704 else
705 pol = vma->vm_policy;
706 } else if (addr)
707 return -EINVAL;
708
709 if (!pol)
bea904d5 710 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
711
712 if (flags & MPOL_F_NODE) {
713 if (flags & MPOL_F_ADDR) {
714 err = lookup_node(mm, addr);
715 if (err < 0)
716 goto out;
8bccd85f 717 *policy = err;
1da177e4 718 } else if (pol == current->mempolicy &&
45c4745a 719 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 720 *policy = current->il_next;
1da177e4
LT
721 } else {
722 err = -EINVAL;
723 goto out;
724 }
bea904d5
LS
725 } else {
726 *policy = pol == &default_policy ? MPOL_DEFAULT :
727 pol->mode;
d79df630
DR
728 /*
729 * Internal mempolicy flags must be masked off before exposing
730 * the policy to userspace.
731 */
732 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 733 }
1da177e4
LT
734
735 if (vma) {
736 up_read(&current->mm->mmap_sem);
737 vma = NULL;
738 }
739
1da177e4 740 err = 0;
8bccd85f 741 if (nmask)
bea904d5 742 get_policy_nodemask(pol, nmask);
1da177e4
LT
743
744 out:
52cd3b07 745 mpol_cond_put(pol);
1da177e4
LT
746 if (vma)
747 up_read(&current->mm->mmap_sem);
748 return err;
749}
750
b20a3503 751#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
752/*
753 * page migration
754 */
fc301289
CL
755static void migrate_page_add(struct page *page, struct list_head *pagelist,
756 unsigned long flags)
6ce3c4c0
CL
757{
758 /*
fc301289 759 * Avoid migrating a page that is shared with others.
6ce3c4c0 760 */
62695a84
NP
761 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
762 if (!isolate_lru_page(page)) {
763 list_add_tail(&page->lru, pagelist);
764 }
765 }
7e2ab150 766}
6ce3c4c0 767
742755a1 768static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 769{
769848c0 770 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
771}
772
7e2ab150
CL
773/*
774 * Migrate pages from one node to a target node.
775 * Returns error or the number of pages not migrated.
776 */
dbcb0f19
AB
777static int migrate_to_node(struct mm_struct *mm, int source, int dest,
778 int flags)
7e2ab150
CL
779{
780 nodemask_t nmask;
781 LIST_HEAD(pagelist);
782 int err = 0;
783
784 nodes_clear(nmask);
785 node_set(source, nmask);
6ce3c4c0 786
7e2ab150
CL
787 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
788 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
789
aaa994b3 790 if (!list_empty(&pagelist))
95a402c3
CL
791 err = migrate_pages(&pagelist, new_node_page, dest);
792
7e2ab150 793 return err;
6ce3c4c0
CL
794}
795
39743889 796/*
7e2ab150
CL
797 * Move pages between the two nodesets so as to preserve the physical
798 * layout as much as possible.
39743889
CL
799 *
800 * Returns the number of page that could not be moved.
801 */
802int do_migrate_pages(struct mm_struct *mm,
803 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
804{
7e2ab150 805 int busy = 0;
0aedadf9 806 int err;
7e2ab150 807 nodemask_t tmp;
39743889 808
0aedadf9
CL
809 err = migrate_prep();
810 if (err)
811 return err;
812
53f2556b 813 down_read(&mm->mmap_sem);
39743889 814
7b2259b3
CL
815 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
816 if (err)
817 goto out;
818
7e2ab150
CL
819/*
820 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
821 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
822 * bit in 'tmp', and return that <source, dest> pair for migration.
823 * The pair of nodemasks 'to' and 'from' define the map.
824 *
825 * If no pair of bits is found that way, fallback to picking some
826 * pair of 'source' and 'dest' bits that are not the same. If the
827 * 'source' and 'dest' bits are the same, this represents a node
828 * that will be migrating to itself, so no pages need move.
829 *
830 * If no bits are left in 'tmp', or if all remaining bits left
831 * in 'tmp' correspond to the same bit in 'to', return false
832 * (nothing left to migrate).
833 *
834 * This lets us pick a pair of nodes to migrate between, such that
835 * if possible the dest node is not already occupied by some other
836 * source node, minimizing the risk of overloading the memory on a
837 * node that would happen if we migrated incoming memory to a node
838 * before migrating outgoing memory source that same node.
839 *
840 * A single scan of tmp is sufficient. As we go, we remember the
841 * most recent <s, d> pair that moved (s != d). If we find a pair
842 * that not only moved, but what's better, moved to an empty slot
843 * (d is not set in tmp), then we break out then, with that pair.
844 * Otherwise when we finish scannng from_tmp, we at least have the
845 * most recent <s, d> pair that moved. If we get all the way through
846 * the scan of tmp without finding any node that moved, much less
847 * moved to an empty node, then there is nothing left worth migrating.
848 */
d4984711 849
7e2ab150
CL
850 tmp = *from_nodes;
851 while (!nodes_empty(tmp)) {
852 int s,d;
853 int source = -1;
854 int dest = 0;
855
856 for_each_node_mask(s, tmp) {
857 d = node_remap(s, *from_nodes, *to_nodes);
858 if (s == d)
859 continue;
860
861 source = s; /* Node moved. Memorize */
862 dest = d;
863
864 /* dest not in remaining from nodes? */
865 if (!node_isset(dest, tmp))
866 break;
867 }
868 if (source == -1)
869 break;
870
871 node_clear(source, tmp);
872 err = migrate_to_node(mm, source, dest, flags);
873 if (err > 0)
874 busy += err;
875 if (err < 0)
876 break;
39743889 877 }
7b2259b3 878out:
39743889 879 up_read(&mm->mmap_sem);
7e2ab150
CL
880 if (err < 0)
881 return err;
882 return busy;
b20a3503
CL
883
884}
885
3ad33b24
LS
886/*
887 * Allocate a new page for page migration based on vma policy.
888 * Start assuming that page is mapped by vma pointed to by @private.
889 * Search forward from there, if not. N.B., this assumes that the
890 * list of pages handed to migrate_pages()--which is how we get here--
891 * is in virtual address order.
892 */
742755a1 893static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
894{
895 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 896 unsigned long uninitialized_var(address);
95a402c3 897
3ad33b24
LS
898 while (vma) {
899 address = page_address_in_vma(page, vma);
900 if (address != -EFAULT)
901 break;
902 vma = vma->vm_next;
903 }
904
905 /*
906 * if !vma, alloc_page_vma() will use task or system default policy
907 */
908 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 909}
b20a3503
CL
910#else
911
912static void migrate_page_add(struct page *page, struct list_head *pagelist,
913 unsigned long flags)
914{
39743889
CL
915}
916
b20a3503
CL
917int do_migrate_pages(struct mm_struct *mm,
918 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
919{
920 return -ENOSYS;
921}
95a402c3 922
69939749 923static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
924{
925 return NULL;
926}
b20a3503
CL
927#endif
928
dbcb0f19 929static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
930 unsigned short mode, unsigned short mode_flags,
931 nodemask_t *nmask, unsigned long flags)
6ce3c4c0
CL
932{
933 struct vm_area_struct *vma;
934 struct mm_struct *mm = current->mm;
935 struct mempolicy *new;
936 unsigned long end;
937 int err;
938 LIST_HEAD(pagelist);
939
a3b51e01
DR
940 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
941 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6ce3c4c0 942 return -EINVAL;
74c00241 943 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
944 return -EPERM;
945
946 if (start & ~PAGE_MASK)
947 return -EINVAL;
948
949 if (mode == MPOL_DEFAULT)
950 flags &= ~MPOL_MF_STRICT;
951
952 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
953 end = start + len;
954
955 if (end < start)
956 return -EINVAL;
957 if (end == start)
958 return 0;
959
028fec41 960 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
961 if (IS_ERR(new))
962 return PTR_ERR(new);
963
964 /*
965 * If we are using the default policy then operation
966 * on discontinuous address spaces is okay after all
967 */
968 if (!new)
969 flags |= MPOL_MF_DISCONTIG_OK;
970
028fec41
DR
971 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
972 start, start + len, mode, mode_flags,
973 nmask ? nodes_addr(*nmask)[0] : -1);
6ce3c4c0 974
0aedadf9
CL
975 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
976
977 err = migrate_prep();
978 if (err)
979 return err;
980 }
6ce3c4c0
CL
981 down_write(&mm->mmap_sem);
982 vma = check_range(mm, start, end, nmask,
983 flags | MPOL_MF_INVERT, &pagelist);
984
985 err = PTR_ERR(vma);
986 if (!IS_ERR(vma)) {
987 int nr_failed = 0;
988
989 err = mbind_range(vma, start, end, new);
7e2ab150 990
6ce3c4c0 991 if (!list_empty(&pagelist))
95a402c3
CL
992 nr_failed = migrate_pages(&pagelist, new_vma_page,
993 (unsigned long)vma);
6ce3c4c0
CL
994
995 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
996 err = -EIO;
997 }
b20a3503 998
6ce3c4c0 999 up_write(&mm->mmap_sem);
f0be3d32 1000 mpol_put(new);
6ce3c4c0
CL
1001 return err;
1002}
1003
8bccd85f
CL
1004/*
1005 * User space interface with variable sized bitmaps for nodelists.
1006 */
1007
1008/* Copy a node mask from user space. */
39743889 1009static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1010 unsigned long maxnode)
1011{
1012 unsigned long k;
1013 unsigned long nlongs;
1014 unsigned long endmask;
1015
1016 --maxnode;
1017 nodes_clear(*nodes);
1018 if (maxnode == 0 || !nmask)
1019 return 0;
a9c930ba 1020 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1021 return -EINVAL;
8bccd85f
CL
1022
1023 nlongs = BITS_TO_LONGS(maxnode);
1024 if ((maxnode % BITS_PER_LONG) == 0)
1025 endmask = ~0UL;
1026 else
1027 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1028
1029 /* When the user specified more nodes than supported just check
1030 if the non supported part is all zero. */
1031 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1032 if (nlongs > PAGE_SIZE/sizeof(long))
1033 return -EINVAL;
1034 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1035 unsigned long t;
1036 if (get_user(t, nmask + k))
1037 return -EFAULT;
1038 if (k == nlongs - 1) {
1039 if (t & endmask)
1040 return -EINVAL;
1041 } else if (t)
1042 return -EINVAL;
1043 }
1044 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1045 endmask = ~0UL;
1046 }
1047
1048 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1049 return -EFAULT;
1050 nodes_addr(*nodes)[nlongs-1] &= endmask;
1051 return 0;
1052}
1053
1054/* Copy a kernel node mask to user space */
1055static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1056 nodemask_t *nodes)
1057{
1058 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1059 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1060
1061 if (copy > nbytes) {
1062 if (copy > PAGE_SIZE)
1063 return -EINVAL;
1064 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1065 return -EFAULT;
1066 copy = nbytes;
1067 }
1068 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1069}
1070
1071asmlinkage long sys_mbind(unsigned long start, unsigned long len,
1072 unsigned long mode,
1073 unsigned long __user *nmask, unsigned long maxnode,
1074 unsigned flags)
1075{
1076 nodemask_t nodes;
1077 int err;
028fec41 1078 unsigned short mode_flags;
8bccd85f 1079
028fec41
DR
1080 mode_flags = mode & MPOL_MODE_FLAGS;
1081 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1082 if (mode >= MPOL_MAX)
1083 return -EINVAL;
4c50bc01
DR
1084 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1085 (mode_flags & MPOL_F_RELATIVE_NODES))
1086 return -EINVAL;
8bccd85f
CL
1087 err = get_nodes(&nodes, nmask, maxnode);
1088 if (err)
1089 return err;
028fec41 1090 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1091}
1092
1093/* Set the process memory policy */
1094asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
1095 unsigned long maxnode)
1096{
1097 int err;
1098 nodemask_t nodes;
028fec41 1099 unsigned short flags;
8bccd85f 1100
028fec41
DR
1101 flags = mode & MPOL_MODE_FLAGS;
1102 mode &= ~MPOL_MODE_FLAGS;
1103 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1104 return -EINVAL;
4c50bc01
DR
1105 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1106 return -EINVAL;
8bccd85f
CL
1107 err = get_nodes(&nodes, nmask, maxnode);
1108 if (err)
1109 return err;
028fec41 1110 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1111}
1112
39743889
CL
1113asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1114 const unsigned long __user *old_nodes,
1115 const unsigned long __user *new_nodes)
1116{
1117 struct mm_struct *mm;
1118 struct task_struct *task;
1119 nodemask_t old;
1120 nodemask_t new;
1121 nodemask_t task_nodes;
1122 int err;
1123
1124 err = get_nodes(&old, old_nodes, maxnode);
1125 if (err)
1126 return err;
1127
1128 err = get_nodes(&new, new_nodes, maxnode);
1129 if (err)
1130 return err;
1131
1132 /* Find the mm_struct */
1133 read_lock(&tasklist_lock);
228ebcbe 1134 task = pid ? find_task_by_vpid(pid) : current;
39743889
CL
1135 if (!task) {
1136 read_unlock(&tasklist_lock);
1137 return -ESRCH;
1138 }
1139 mm = get_task_mm(task);
1140 read_unlock(&tasklist_lock);
1141
1142 if (!mm)
1143 return -EINVAL;
1144
1145 /*
1146 * Check if this process has the right to modify the specified
1147 * process. The right exists if the process has administrative
7f927fcc 1148 * capabilities, superuser privileges or the same
39743889
CL
1149 * userid as the target process.
1150 */
1151 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1152 (current->uid != task->suid) && (current->uid != task->uid) &&
74c00241 1153 !capable(CAP_SYS_NICE)) {
39743889
CL
1154 err = -EPERM;
1155 goto out;
1156 }
1157
1158 task_nodes = cpuset_mems_allowed(task);
1159 /* Is the user allowed to access the target nodes? */
74c00241 1160 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
1161 err = -EPERM;
1162 goto out;
1163 }
1164
37b07e41 1165 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
3b42d28b
CL
1166 err = -EINVAL;
1167 goto out;
1168 }
1169
86c3a764
DQ
1170 err = security_task_movememory(task);
1171 if (err)
1172 goto out;
1173
511030bc 1174 err = do_migrate_pages(mm, &old, &new,
74c00241 1175 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
1176out:
1177 mmput(mm);
1178 return err;
1179}
1180
1181
8bccd85f
CL
1182/* Retrieve NUMA policy */
1183asmlinkage long sys_get_mempolicy(int __user *policy,
1184 unsigned long __user *nmask,
1185 unsigned long maxnode,
1186 unsigned long addr, unsigned long flags)
1187{
dbcb0f19
AB
1188 int err;
1189 int uninitialized_var(pval);
8bccd85f
CL
1190 nodemask_t nodes;
1191
1192 if (nmask != NULL && maxnode < MAX_NUMNODES)
1193 return -EINVAL;
1194
1195 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1196
1197 if (err)
1198 return err;
1199
1200 if (policy && put_user(pval, policy))
1201 return -EFAULT;
1202
1203 if (nmask)
1204 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1205
1206 return err;
1207}
1208
1da177e4
LT
1209#ifdef CONFIG_COMPAT
1210
1211asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1212 compat_ulong_t __user *nmask,
1213 compat_ulong_t maxnode,
1214 compat_ulong_t addr, compat_ulong_t flags)
1215{
1216 long err;
1217 unsigned long __user *nm = NULL;
1218 unsigned long nr_bits, alloc_size;
1219 DECLARE_BITMAP(bm, MAX_NUMNODES);
1220
1221 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1222 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1223
1224 if (nmask)
1225 nm = compat_alloc_user_space(alloc_size);
1226
1227 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1228
1229 if (!err && nmask) {
1230 err = copy_from_user(bm, nm, alloc_size);
1231 /* ensure entire bitmap is zeroed */
1232 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1233 err |= compat_put_bitmap(nmask, bm, nr_bits);
1234 }
1235
1236 return err;
1237}
1238
1239asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1240 compat_ulong_t maxnode)
1241{
1242 long err = 0;
1243 unsigned long __user *nm = NULL;
1244 unsigned long nr_bits, alloc_size;
1245 DECLARE_BITMAP(bm, MAX_NUMNODES);
1246
1247 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1248 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1249
1250 if (nmask) {
1251 err = compat_get_bitmap(bm, nmask, nr_bits);
1252 nm = compat_alloc_user_space(alloc_size);
1253 err |= copy_to_user(nm, bm, alloc_size);
1254 }
1255
1256 if (err)
1257 return -EFAULT;
1258
1259 return sys_set_mempolicy(mode, nm, nr_bits+1);
1260}
1261
1262asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1263 compat_ulong_t mode, compat_ulong_t __user *nmask,
1264 compat_ulong_t maxnode, compat_ulong_t flags)
1265{
1266 long err = 0;
1267 unsigned long __user *nm = NULL;
1268 unsigned long nr_bits, alloc_size;
dfcd3c0d 1269 nodemask_t bm;
1da177e4
LT
1270
1271 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1272 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1273
1274 if (nmask) {
dfcd3c0d 1275 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1276 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1277 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1278 }
1279
1280 if (err)
1281 return -EFAULT;
1282
1283 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1284}
1285
1286#endif
1287
480eccf9
LS
1288/*
1289 * get_vma_policy(@task, @vma, @addr)
1290 * @task - task for fallback if vma policy == default
1291 * @vma - virtual memory area whose policy is sought
1292 * @addr - address in @vma for shared policy lookup
1293 *
1294 * Returns effective policy for a VMA at specified address.
1295 * Falls back to @task or system default policy, as necessary.
52cd3b07
LS
1296 * Current or other task's task mempolicy and non-shared vma policies
1297 * are protected by the task's mmap_sem, which must be held for read by
1298 * the caller.
1299 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1300 * count--added by the get_policy() vm_op, as appropriate--to protect against
1301 * freeing by another task. It is the caller's responsibility to free the
1302 * extra reference for shared policies.
480eccf9 1303 */
ae4d8c16 1304static struct mempolicy *get_vma_policy(struct task_struct *task,
48fce342 1305 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1306{
6e21c8f1 1307 struct mempolicy *pol = task->mempolicy;
1da177e4
LT
1308
1309 if (vma) {
480eccf9 1310 if (vma->vm_ops && vma->vm_ops->get_policy) {
ae4d8c16
LS
1311 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1312 addr);
1313 if (vpol)
1314 pol = vpol;
bea904d5 1315 } else if (vma->vm_policy)
1da177e4
LT
1316 pol = vma->vm_policy;
1317 }
1318 if (!pol)
1319 pol = &default_policy;
1320 return pol;
1321}
1322
52cd3b07
LS
1323/*
1324 * Return a nodemask representing a mempolicy for filtering nodes for
1325 * page allocation
1326 */
1327static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1328{
1329 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1330 if (unlikely(policy->mode == MPOL_BIND) &&
19770b32
MG
1331 gfp_zone(gfp) >= policy_zone &&
1332 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1333 return &policy->v.nodes;
1334
1335 return NULL;
1336}
1337
52cd3b07
LS
1338/* Return a zonelist indicated by gfp for node representing a mempolicy */
1339static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1da177e4 1340{
fc36b8d3 1341 int nd = numa_node_id();
1da177e4 1342
45c4745a 1343 switch (policy->mode) {
1da177e4 1344 case MPOL_PREFERRED:
fc36b8d3
LS
1345 if (!(policy->flags & MPOL_F_LOCAL))
1346 nd = policy->v.preferred_node;
1da177e4
LT
1347 break;
1348 case MPOL_BIND:
19770b32 1349 /*
52cd3b07
LS
1350 * Normally, MPOL_BIND allocations are node-local within the
1351 * allowed nodemask. However, if __GFP_THISNODE is set and the
1352 * current node is part of the mask, we use the zonelist for
1353 * the first node in the mask instead.
19770b32 1354 */
19770b32
MG
1355 if (unlikely(gfp & __GFP_THISNODE) &&
1356 unlikely(!node_isset(nd, policy->v.nodes)))
1357 nd = first_node(policy->v.nodes);
1358 break;
1da177e4 1359 case MPOL_INTERLEAVE: /* should not happen */
1da177e4
LT
1360 break;
1361 default:
1da177e4
LT
1362 BUG();
1363 }
0e88460d 1364 return node_zonelist(nd, gfp);
1da177e4
LT
1365}
1366
1367/* Do dynamic interleaving for a process */
1368static unsigned interleave_nodes(struct mempolicy *policy)
1369{
1370 unsigned nid, next;
1371 struct task_struct *me = current;
1372
1373 nid = me->il_next;
dfcd3c0d 1374 next = next_node(nid, policy->v.nodes);
1da177e4 1375 if (next >= MAX_NUMNODES)
dfcd3c0d 1376 next = first_node(policy->v.nodes);
f5b087b5
DR
1377 if (next < MAX_NUMNODES)
1378 me->il_next = next;
1da177e4
LT
1379 return nid;
1380}
1381
dc85da15
CL
1382/*
1383 * Depending on the memory policy provide a node from which to allocate the
1384 * next slab entry.
52cd3b07
LS
1385 * @policy must be protected by freeing by the caller. If @policy is
1386 * the current task's mempolicy, this protection is implicit, as only the
1387 * task can change it's policy. The system default policy requires no
1388 * such protection.
dc85da15
CL
1389 */
1390unsigned slab_node(struct mempolicy *policy)
1391{
fc36b8d3 1392 if (!policy || policy->flags & MPOL_F_LOCAL)
bea904d5
LS
1393 return numa_node_id();
1394
1395 switch (policy->mode) {
1396 case MPOL_PREFERRED:
fc36b8d3
LS
1397 /*
1398 * handled MPOL_F_LOCAL above
1399 */
1400 return policy->v.preferred_node;
765c4507 1401
dc85da15
CL
1402 case MPOL_INTERLEAVE:
1403 return interleave_nodes(policy);
1404
dd1a239f 1405 case MPOL_BIND: {
dc85da15
CL
1406 /*
1407 * Follow bind policy behavior and start allocation at the
1408 * first node.
1409 */
19770b32
MG
1410 struct zonelist *zonelist;
1411 struct zone *zone;
1412 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1413 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1414 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1415 &policy->v.nodes,
1416 &zone);
1417 return zone->node;
dd1a239f 1418 }
dc85da15 1419
dc85da15 1420 default:
bea904d5 1421 BUG();
dc85da15
CL
1422 }
1423}
1424
1da177e4
LT
1425/* Do static interleaving for a VMA with known offset. */
1426static unsigned offset_il_node(struct mempolicy *pol,
1427 struct vm_area_struct *vma, unsigned long off)
1428{
dfcd3c0d 1429 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1430 unsigned target;
1da177e4
LT
1431 int c;
1432 int nid = -1;
1433
f5b087b5
DR
1434 if (!nnodes)
1435 return numa_node_id();
1436 target = (unsigned int)off % nnodes;
1da177e4
LT
1437 c = 0;
1438 do {
dfcd3c0d 1439 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1440 c++;
1441 } while (c <= target);
1da177e4
LT
1442 return nid;
1443}
1444
5da7ca86
CL
1445/* Determine a node number for interleave */
1446static inline unsigned interleave_nid(struct mempolicy *pol,
1447 struct vm_area_struct *vma, unsigned long addr, int shift)
1448{
1449 if (vma) {
1450 unsigned long off;
1451
3b98b087
NA
1452 /*
1453 * for small pages, there is no difference between
1454 * shift and PAGE_SHIFT, so the bit-shift is safe.
1455 * for huge pages, since vm_pgoff is in units of small
1456 * pages, we need to shift off the always 0 bits to get
1457 * a useful offset.
1458 */
1459 BUG_ON(shift < PAGE_SHIFT);
1460 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1461 off += (addr - vma->vm_start) >> shift;
1462 return offset_il_node(pol, vma, off);
1463 } else
1464 return interleave_nodes(pol);
1465}
1466
00ac59ad 1467#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1468/*
1469 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1470 * @vma = virtual memory area whose policy is sought
1471 * @addr = address in @vma for shared policy lookup and interleave policy
1472 * @gfp_flags = for requested zone
19770b32
MG
1473 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1474 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1475 *
52cd3b07
LS
1476 * Returns a zonelist suitable for a huge page allocation and a pointer
1477 * to the struct mempolicy for conditional unref after allocation.
1478 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1479 * @nodemask for filtering the zonelist.
480eccf9 1480 */
396faf03 1481struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1482 gfp_t gfp_flags, struct mempolicy **mpol,
1483 nodemask_t **nodemask)
5da7ca86 1484{
480eccf9 1485 struct zonelist *zl;
5da7ca86 1486
52cd3b07 1487 *mpol = get_vma_policy(current, vma, addr);
19770b32 1488 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1489
52cd3b07
LS
1490 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1491 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
a5516438 1492 huge_page_shift(hstate_vma(vma))), gfp_flags);
52cd3b07
LS
1493 } else {
1494 zl = policy_zonelist(gfp_flags, *mpol);
1495 if ((*mpol)->mode == MPOL_BIND)
1496 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1497 }
1498 return zl;
5da7ca86 1499}
00ac59ad 1500#endif
5da7ca86 1501
1da177e4
LT
1502/* Allocate a page in interleaved policy.
1503 Own path because it needs to do special accounting. */
662f3a0b
AK
1504static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1505 unsigned nid)
1da177e4
LT
1506{
1507 struct zonelist *zl;
1508 struct page *page;
1509
0e88460d 1510 zl = node_zonelist(nid, gfp);
1da177e4 1511 page = __alloc_pages(gfp, order, zl);
dd1a239f 1512 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1513 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1514 return page;
1515}
1516
1517/**
1518 * alloc_page_vma - Allocate a page for a VMA.
1519 *
1520 * @gfp:
1521 * %GFP_USER user allocation.
1522 * %GFP_KERNEL kernel allocations,
1523 * %GFP_HIGHMEM highmem/user allocations,
1524 * %GFP_FS allocation should not call back into a file system.
1525 * %GFP_ATOMIC don't sleep.
1526 *
1527 * @vma: Pointer to VMA or NULL if not available.
1528 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1529 *
1530 * This function allocates a page from the kernel page pool and applies
1531 * a NUMA policy associated with the VMA or the current process.
1532 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1533 * mm_struct of the VMA to prevent it from going away. Should be used for
1534 * all allocations for pages that will be mapped into
1535 * user space. Returns NULL when no page can be allocated.
1536 *
1537 * Should be called with the mm_sem of the vma hold.
1538 */
1539struct page *
dd0fc66f 1540alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1541{
6e21c8f1 1542 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1543 struct zonelist *zl;
1da177e4 1544
cf2a473c 1545 cpuset_update_task_memory_state();
1da177e4 1546
45c4745a 1547 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1da177e4 1548 unsigned nid;
5da7ca86
CL
1549
1550 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
52cd3b07 1551 mpol_cond_put(pol);
1da177e4
LT
1552 return alloc_page_interleave(gfp, 0, nid);
1553 }
52cd3b07
LS
1554 zl = policy_zonelist(gfp, pol);
1555 if (unlikely(mpol_needs_cond_ref(pol))) {
480eccf9 1556 /*
52cd3b07 1557 * slow path: ref counted shared policy
480eccf9 1558 */
19770b32 1559 struct page *page = __alloc_pages_nodemask(gfp, 0,
52cd3b07 1560 zl, policy_nodemask(gfp, pol));
f0be3d32 1561 __mpol_put(pol);
480eccf9
LS
1562 return page;
1563 }
1564 /*
1565 * fast path: default or task policy
1566 */
52cd3b07 1567 return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1da177e4
LT
1568}
1569
1570/**
1571 * alloc_pages_current - Allocate pages.
1572 *
1573 * @gfp:
1574 * %GFP_USER user allocation,
1575 * %GFP_KERNEL kernel allocation,
1576 * %GFP_HIGHMEM highmem allocation,
1577 * %GFP_FS don't call back into a file system.
1578 * %GFP_ATOMIC don't sleep.
1579 * @order: Power of two of allocation size in pages. 0 is a single page.
1580 *
1581 * Allocate a page from the kernel page pool. When not in
1582 * interrupt context and apply the current process NUMA policy.
1583 * Returns NULL when no page can be allocated.
1584 *
cf2a473c 1585 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1586 * 1) it's ok to take cpuset_sem (can WAIT), and
1587 * 2) allocating for current task (not interrupt).
1588 */
dd0fc66f 1589struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1590{
1591 struct mempolicy *pol = current->mempolicy;
1592
1593 if ((gfp & __GFP_WAIT) && !in_interrupt())
cf2a473c 1594 cpuset_update_task_memory_state();
9b819d20 1595 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4 1596 pol = &default_policy;
52cd3b07
LS
1597
1598 /*
1599 * No reference counting needed for current->mempolicy
1600 * nor system default_policy
1601 */
45c4745a 1602 if (pol->mode == MPOL_INTERLEAVE)
1da177e4 1603 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
19770b32 1604 return __alloc_pages_nodemask(gfp, order,
52cd3b07 1605 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1da177e4
LT
1606}
1607EXPORT_SYMBOL(alloc_pages_current);
1608
4225399a 1609/*
846a16bf 1610 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
1611 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1612 * with the mems_allowed returned by cpuset_mems_allowed(). This
1613 * keeps mempolicies cpuset relative after its cpuset moves. See
1614 * further kernel/cpuset.c update_nodemask().
1615 */
4225399a 1616
846a16bf
LS
1617/* Slow path of a mempolicy duplicate */
1618struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
1619{
1620 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1621
1622 if (!new)
1623 return ERR_PTR(-ENOMEM);
4225399a
PJ
1624 if (current_cpuset_is_being_rebound()) {
1625 nodemask_t mems = cpuset_mems_allowed(current);
1626 mpol_rebind_policy(old, &mems);
1627 }
1da177e4
LT
1628 *new = *old;
1629 atomic_set(&new->refcnt, 1);
1da177e4
LT
1630 return new;
1631}
1632
52cd3b07
LS
1633/*
1634 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1635 * eliminate the * MPOL_F_* flags that require conditional ref and
1636 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1637 * after return. Use the returned value.
1638 *
1639 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1640 * policy lookup, even if the policy needs/has extra ref on lookup.
1641 * shmem_readahead needs this.
1642 */
1643struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1644 struct mempolicy *frompol)
1645{
1646 if (!mpol_needs_cond_ref(frompol))
1647 return frompol;
1648
1649 *tompol = *frompol;
1650 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1651 __mpol_put(frompol);
1652 return tompol;
1653}
1654
f5b087b5
DR
1655static int mpol_match_intent(const struct mempolicy *a,
1656 const struct mempolicy *b)
1657{
1658 if (a->flags != b->flags)
1659 return 0;
1660 if (!mpol_store_user_nodemask(a))
1661 return 1;
1662 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1663}
1664
1da177e4
LT
1665/* Slow path of a mempolicy comparison */
1666int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1667{
1668 if (!a || !b)
1669 return 0;
45c4745a 1670 if (a->mode != b->mode)
1da177e4 1671 return 0;
45c4745a 1672 if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
f5b087b5 1673 return 0;
45c4745a 1674 switch (a->mode) {
19770b32
MG
1675 case MPOL_BIND:
1676 /* Fall through */
1da177e4 1677 case MPOL_INTERLEAVE:
dfcd3c0d 1678 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 1679 case MPOL_PREFERRED:
fc36b8d3
LS
1680 return a->v.preferred_node == b->v.preferred_node &&
1681 a->flags == b->flags;
1da177e4
LT
1682 default:
1683 BUG();
1684 return 0;
1685 }
1686}
1687
1da177e4
LT
1688/*
1689 * Shared memory backing store policy support.
1690 *
1691 * Remember policies even when nobody has shared memory mapped.
1692 * The policies are kept in Red-Black tree linked from the inode.
1693 * They are protected by the sp->lock spinlock, which should be held
1694 * for any accesses to the tree.
1695 */
1696
1697/* lookup first element intersecting start-end */
1698/* Caller holds sp->lock */
1699static struct sp_node *
1700sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1701{
1702 struct rb_node *n = sp->root.rb_node;
1703
1704 while (n) {
1705 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1706
1707 if (start >= p->end)
1708 n = n->rb_right;
1709 else if (end <= p->start)
1710 n = n->rb_left;
1711 else
1712 break;
1713 }
1714 if (!n)
1715 return NULL;
1716 for (;;) {
1717 struct sp_node *w = NULL;
1718 struct rb_node *prev = rb_prev(n);
1719 if (!prev)
1720 break;
1721 w = rb_entry(prev, struct sp_node, nd);
1722 if (w->end <= start)
1723 break;
1724 n = prev;
1725 }
1726 return rb_entry(n, struct sp_node, nd);
1727}
1728
1729/* Insert a new shared policy into the list. */
1730/* Caller holds sp->lock */
1731static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1732{
1733 struct rb_node **p = &sp->root.rb_node;
1734 struct rb_node *parent = NULL;
1735 struct sp_node *nd;
1736
1737 while (*p) {
1738 parent = *p;
1739 nd = rb_entry(parent, struct sp_node, nd);
1740 if (new->start < nd->start)
1741 p = &(*p)->rb_left;
1742 else if (new->end > nd->end)
1743 p = &(*p)->rb_right;
1744 else
1745 BUG();
1746 }
1747 rb_link_node(&new->nd, parent, p);
1748 rb_insert_color(&new->nd, &sp->root);
140d5a49 1749 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 1750 new->policy ? new->policy->mode : 0);
1da177e4
LT
1751}
1752
1753/* Find shared policy intersecting idx */
1754struct mempolicy *
1755mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1756{
1757 struct mempolicy *pol = NULL;
1758 struct sp_node *sn;
1759
1760 if (!sp->root.rb_node)
1761 return NULL;
1762 spin_lock(&sp->lock);
1763 sn = sp_lookup(sp, idx, idx+1);
1764 if (sn) {
1765 mpol_get(sn->policy);
1766 pol = sn->policy;
1767 }
1768 spin_unlock(&sp->lock);
1769 return pol;
1770}
1771
1772static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1773{
140d5a49 1774 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 1775 rb_erase(&n->nd, &sp->root);
f0be3d32 1776 mpol_put(n->policy);
1da177e4
LT
1777 kmem_cache_free(sn_cache, n);
1778}
1779
dbcb0f19
AB
1780static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1781 struct mempolicy *pol)
1da177e4
LT
1782{
1783 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1784
1785 if (!n)
1786 return NULL;
1787 n->start = start;
1788 n->end = end;
1789 mpol_get(pol);
aab0b102 1790 pol->flags |= MPOL_F_SHARED; /* for unref */
1da177e4
LT
1791 n->policy = pol;
1792 return n;
1793}
1794
1795/* Replace a policy range. */
1796static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1797 unsigned long end, struct sp_node *new)
1798{
1799 struct sp_node *n, *new2 = NULL;
1800
1801restart:
1802 spin_lock(&sp->lock);
1803 n = sp_lookup(sp, start, end);
1804 /* Take care of old policies in the same range. */
1805 while (n && n->start < end) {
1806 struct rb_node *next = rb_next(&n->nd);
1807 if (n->start >= start) {
1808 if (n->end <= end)
1809 sp_delete(sp, n);
1810 else
1811 n->start = end;
1812 } else {
1813 /* Old policy spanning whole new range. */
1814 if (n->end > end) {
1815 if (!new2) {
1816 spin_unlock(&sp->lock);
1817 new2 = sp_alloc(end, n->end, n->policy);
1818 if (!new2)
1819 return -ENOMEM;
1820 goto restart;
1821 }
1822 n->end = start;
1823 sp_insert(sp, new2);
1824 new2 = NULL;
1825 break;
1826 } else
1827 n->end = start;
1828 }
1829 if (!next)
1830 break;
1831 n = rb_entry(next, struct sp_node, nd);
1832 }
1833 if (new)
1834 sp_insert(sp, new);
1835 spin_unlock(&sp->lock);
1836 if (new2) {
f0be3d32 1837 mpol_put(new2->policy);
1da177e4
LT
1838 kmem_cache_free(sn_cache, new2);
1839 }
1840 return 0;
1841}
1842
71fe804b
LS
1843/**
1844 * mpol_shared_policy_init - initialize shared policy for inode
1845 * @sp: pointer to inode shared policy
1846 * @mpol: struct mempolicy to install
1847 *
1848 * Install non-NULL @mpol in inode's shared policy rb-tree.
1849 * On entry, the current task has a reference on a non-NULL @mpol.
1850 * This must be released on exit.
1851 */
1852void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1853{
1854 sp->root = RB_ROOT; /* empty tree == default mempolicy */
1855 spin_lock_init(&sp->lock);
1856
1857 if (mpol) {
1858 struct vm_area_struct pvma;
1859 struct mempolicy *new;
1860
1861 /* contextualize the tmpfs mount point mempolicy */
1862 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1863 mpol_put(mpol); /* drop our ref on sb mpol */
1864 if (IS_ERR(new))
1865 return; /* no valid nodemask intersection */
1866
1867 /* Create pseudo-vma that contains just the policy */
1868 memset(&pvma, 0, sizeof(struct vm_area_struct));
1869 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
1870 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
1871 mpol_put(new); /* drop initial ref */
7339ff83
RH
1872 }
1873}
1874
1da177e4
LT
1875int mpol_set_shared_policy(struct shared_policy *info,
1876 struct vm_area_struct *vma, struct mempolicy *npol)
1877{
1878 int err;
1879 struct sp_node *new = NULL;
1880 unsigned long sz = vma_pages(vma);
1881
028fec41 1882 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 1883 vma->vm_pgoff,
45c4745a 1884 sz, npol ? npol->mode : -1,
028fec41 1885 npol ? npol->flags : -1,
140d5a49 1886 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
1887
1888 if (npol) {
1889 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1890 if (!new)
1891 return -ENOMEM;
1892 }
1893 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1894 if (err && new)
1895 kmem_cache_free(sn_cache, new);
1896 return err;
1897}
1898
1899/* Free a backing policy store on inode delete. */
1900void mpol_free_shared_policy(struct shared_policy *p)
1901{
1902 struct sp_node *n;
1903 struct rb_node *next;
1904
1905 if (!p->root.rb_node)
1906 return;
1907 spin_lock(&p->lock);
1908 next = rb_first(&p->root);
1909 while (next) {
1910 n = rb_entry(next, struct sp_node, nd);
1911 next = rb_next(&n->nd);
90c5029e 1912 rb_erase(&n->nd, &p->root);
f0be3d32 1913 mpol_put(n->policy);
1da177e4
LT
1914 kmem_cache_free(sn_cache, n);
1915 }
1916 spin_unlock(&p->lock);
1da177e4
LT
1917}
1918
1919/* assumes fs == KERNEL_DS */
1920void __init numa_policy_init(void)
1921{
b71636e2
PM
1922 nodemask_t interleave_nodes;
1923 unsigned long largest = 0;
1924 int nid, prefer = 0;
1925
1da177e4
LT
1926 policy_cache = kmem_cache_create("numa_policy",
1927 sizeof(struct mempolicy),
20c2df83 1928 0, SLAB_PANIC, NULL);
1da177e4
LT
1929
1930 sn_cache = kmem_cache_create("shared_policy_node",
1931 sizeof(struct sp_node),
20c2df83 1932 0, SLAB_PANIC, NULL);
1da177e4 1933
b71636e2
PM
1934 /*
1935 * Set interleaving policy for system init. Interleaving is only
1936 * enabled across suitably sized nodes (default is >= 16MB), or
1937 * fall back to the largest node if they're all smaller.
1938 */
1939 nodes_clear(interleave_nodes);
56bbd65d 1940 for_each_node_state(nid, N_HIGH_MEMORY) {
b71636e2
PM
1941 unsigned long total_pages = node_present_pages(nid);
1942
1943 /* Preserve the largest node */
1944 if (largest < total_pages) {
1945 largest = total_pages;
1946 prefer = nid;
1947 }
1948
1949 /* Interleave this node? */
1950 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1951 node_set(nid, interleave_nodes);
1952 }
1953
1954 /* All too small, use the largest */
1955 if (unlikely(nodes_empty(interleave_nodes)))
1956 node_set(prefer, interleave_nodes);
1da177e4 1957
028fec41 1958 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1da177e4
LT
1959 printk("numa_policy_init: interleaving failed\n");
1960}
1961
8bccd85f 1962/* Reset policy of current process to default */
1da177e4
LT
1963void numa_default_policy(void)
1964{
028fec41 1965 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 1966}
68860ec1 1967
095f1fc4
LS
1968/*
1969 * Parse and format mempolicy from/to strings
1970 */
1971
1a75a6c8 1972/*
fc36b8d3 1973 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
3f226aa1 1974 * Used only for mpol_parse_str() and mpol_to_str()
1a75a6c8 1975 */
53f2556b 1976#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
15ad7cdc 1977static const char * const policy_types[] =
53f2556b 1978 { "default", "prefer", "bind", "interleave", "local" };
1a75a6c8 1979
095f1fc4
LS
1980
1981#ifdef CONFIG_TMPFS
1982/**
1983 * mpol_parse_str - parse string to mempolicy
1984 * @str: string containing mempolicy to parse
71fe804b
LS
1985 * @mpol: pointer to struct mempolicy pointer, returned on success.
1986 * @no_context: flag whether to "contextualize" the mempolicy
095f1fc4
LS
1987 *
1988 * Format of input:
1989 * <mode>[=<flags>][:<nodelist>]
1990 *
71fe804b
LS
1991 * if @no_context is true, save the input nodemask in w.user_nodemask in
1992 * the returned mempolicy. This will be used to "clone" the mempolicy in
1993 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
1994 * mount option. Note that if 'static' or 'relative' mode flags were
1995 * specified, the input nodemask will already have been saved. Saving
1996 * it again is redundant, but safe.
1997 *
1998 * On success, returns 0, else 1
095f1fc4 1999 */
71fe804b 2000int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
095f1fc4 2001{
71fe804b
LS
2002 struct mempolicy *new = NULL;
2003 unsigned short uninitialized_var(mode);
2004 unsigned short uninitialized_var(mode_flags);
2005 nodemask_t nodes;
095f1fc4
LS
2006 char *nodelist = strchr(str, ':');
2007 char *flags = strchr(str, '=');
2008 int i;
2009 int err = 1;
2010
2011 if (nodelist) {
2012 /* NUL-terminate mode or flags string */
2013 *nodelist++ = '\0';
71fe804b 2014 if (nodelist_parse(nodelist, nodes))
095f1fc4 2015 goto out;
71fe804b 2016 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
095f1fc4 2017 goto out;
71fe804b
LS
2018 } else
2019 nodes_clear(nodes);
2020
095f1fc4
LS
2021 if (flags)
2022 *flags++ = '\0'; /* terminate mode string */
2023
3f226aa1 2024 for (i = 0; i <= MPOL_LOCAL; i++) {
095f1fc4 2025 if (!strcmp(str, policy_types[i])) {
71fe804b 2026 mode = i;
095f1fc4
LS
2027 break;
2028 }
2029 }
3f226aa1 2030 if (i > MPOL_LOCAL)
095f1fc4
LS
2031 goto out;
2032
71fe804b 2033 switch (mode) {
095f1fc4 2034 case MPOL_PREFERRED:
71fe804b
LS
2035 /*
2036 * Insist on a nodelist of one node only
2037 */
095f1fc4
LS
2038 if (nodelist) {
2039 char *rest = nodelist;
2040 while (isdigit(*rest))
2041 rest++;
2042 if (!*rest)
2043 err = 0;
2044 }
2045 break;
095f1fc4
LS
2046 case MPOL_INTERLEAVE:
2047 /*
2048 * Default to online nodes with memory if no nodelist
2049 */
2050 if (!nodelist)
71fe804b 2051 nodes = node_states[N_HIGH_MEMORY];
095f1fc4 2052 err = 0;
3f226aa1 2053 break;
71fe804b 2054 case MPOL_LOCAL:
3f226aa1 2055 /*
71fe804b 2056 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2057 */
71fe804b 2058 if (nodelist)
3f226aa1 2059 goto out;
71fe804b 2060 mode = MPOL_PREFERRED;
3f226aa1 2061 break;
71fe804b
LS
2062
2063 /*
2064 * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
2065 * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
2066 */
095f1fc4
LS
2067 }
2068
71fe804b 2069 mode_flags = 0;
095f1fc4
LS
2070 if (flags) {
2071 /*
2072 * Currently, we only support two mutually exclusive
2073 * mode flags.
2074 */
2075 if (!strcmp(flags, "static"))
71fe804b 2076 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2077 else if (!strcmp(flags, "relative"))
71fe804b 2078 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4
LS
2079 else
2080 err = 1;
2081 }
71fe804b
LS
2082
2083 new = mpol_new(mode, mode_flags, &nodes);
2084 if (IS_ERR(new))
2085 err = 1;
2086 else if (no_context)
2087 new->w.user_nodemask = nodes; /* save for contextualization */
2088
095f1fc4
LS
2089out:
2090 /* Restore string for error message */
2091 if (nodelist)
2092 *--nodelist = ':';
2093 if (flags)
2094 *--flags = '=';
71fe804b
LS
2095 if (!err)
2096 *mpol = new;
095f1fc4
LS
2097 return err;
2098}
2099#endif /* CONFIG_TMPFS */
2100
71fe804b
LS
2101/**
2102 * mpol_to_str - format a mempolicy structure for printing
2103 * @buffer: to contain formatted mempolicy string
2104 * @maxlen: length of @buffer
2105 * @pol: pointer to mempolicy to be formatted
2106 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2107 *
1a75a6c8
CL
2108 * Convert a mempolicy into a string.
2109 * Returns the number of characters in buffer (if positive)
2110 * or an error (negative)
2111 */
71fe804b 2112int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
1a75a6c8
CL
2113{
2114 char *p = buffer;
2115 int l;
2116 nodemask_t nodes;
bea904d5 2117 unsigned short mode;
f5b087b5 2118 unsigned short flags = pol ? pol->flags : 0;
1a75a6c8 2119
2291990a
LS
2120 /*
2121 * Sanity check: room for longest mode, flag and some nodes
2122 */
2123 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2124
bea904d5
LS
2125 if (!pol || pol == &default_policy)
2126 mode = MPOL_DEFAULT;
2127 else
2128 mode = pol->mode;
2129
1a75a6c8
CL
2130 switch (mode) {
2131 case MPOL_DEFAULT:
2132 nodes_clear(nodes);
2133 break;
2134
2135 case MPOL_PREFERRED:
2136 nodes_clear(nodes);
fc36b8d3 2137 if (flags & MPOL_F_LOCAL)
53f2556b
LS
2138 mode = MPOL_LOCAL; /* pseudo-policy */
2139 else
fc36b8d3 2140 node_set(pol->v.preferred_node, nodes);
1a75a6c8
CL
2141 break;
2142
2143 case MPOL_BIND:
19770b32 2144 /* Fall through */
1a75a6c8 2145 case MPOL_INTERLEAVE:
71fe804b
LS
2146 if (no_context)
2147 nodes = pol->w.user_nodemask;
2148 else
2149 nodes = pol->v.nodes;
1a75a6c8
CL
2150 break;
2151
2152 default:
2153 BUG();
1a75a6c8
CL
2154 }
2155
2156 l = strlen(policy_types[mode]);
53f2556b
LS
2157 if (buffer + maxlen < p + l + 1)
2158 return -ENOSPC;
1a75a6c8
CL
2159
2160 strcpy(p, policy_types[mode]);
2161 p += l;
2162
fc36b8d3 2163 if (flags & MPOL_MODE_FLAGS) {
f5b087b5
DR
2164 if (buffer + maxlen < p + 2)
2165 return -ENOSPC;
2166 *p++ = '=';
2167
2291990a
LS
2168 /*
2169 * Currently, the only defined flags are mutually exclusive
2170 */
f5b087b5 2171 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2172 p += snprintf(p, buffer + maxlen - p, "static");
2173 else if (flags & MPOL_F_RELATIVE_NODES)
2174 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2175 }
2176
1a75a6c8
CL
2177 if (!nodes_empty(nodes)) {
2178 if (buffer + maxlen < p + 2)
2179 return -ENOSPC;
095f1fc4 2180 *p++ = ':';
1a75a6c8
CL
2181 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2182 }
2183 return p - buffer;
2184}
2185
2186struct numa_maps {
2187 unsigned long pages;
2188 unsigned long anon;
397874df
CL
2189 unsigned long active;
2190 unsigned long writeback;
1a75a6c8 2191 unsigned long mapcount_max;
397874df
CL
2192 unsigned long dirty;
2193 unsigned long swapcache;
1a75a6c8
CL
2194 unsigned long node[MAX_NUMNODES];
2195};
2196
397874df 2197static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
2198{
2199 struct numa_maps *md = private;
2200 int count = page_mapcount(page);
2201
397874df
CL
2202 md->pages++;
2203 if (pte_dirty || PageDirty(page))
2204 md->dirty++;
1a75a6c8 2205
397874df
CL
2206 if (PageSwapCache(page))
2207 md->swapcache++;
1a75a6c8 2208
894bc310 2209 if (PageActive(page) || PageUnevictable(page))
397874df
CL
2210 md->active++;
2211
2212 if (PageWriteback(page))
2213 md->writeback++;
1a75a6c8
CL
2214
2215 if (PageAnon(page))
2216 md->anon++;
2217
397874df
CL
2218 if (count > md->mapcount_max)
2219 md->mapcount_max = count;
2220
1a75a6c8 2221 md->node[page_to_nid(page)]++;
1a75a6c8
CL
2222}
2223
7f709ed0 2224#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
2225static void check_huge_range(struct vm_area_struct *vma,
2226 unsigned long start, unsigned long end,
2227 struct numa_maps *md)
2228{
2229 unsigned long addr;
2230 struct page *page;
a5516438
AK
2231 struct hstate *h = hstate_vma(vma);
2232 unsigned long sz = huge_page_size(h);
397874df 2233
a5516438
AK
2234 for (addr = start; addr < end; addr += sz) {
2235 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2236 addr & huge_page_mask(h));
397874df
CL
2237 pte_t pte;
2238
2239 if (!ptep)
2240 continue;
2241
2242 pte = *ptep;
2243 if (pte_none(pte))
2244 continue;
2245
2246 page = pte_page(pte);
2247 if (!page)
2248 continue;
2249
2250 gather_stats(page, md, pte_dirty(*ptep));
2251 }
2252}
7f709ed0
AM
2253#else
2254static inline void check_huge_range(struct vm_area_struct *vma,
2255 unsigned long start, unsigned long end,
2256 struct numa_maps *md)
2257{
2258}
2259#endif
397874df 2260
53f2556b
LS
2261/*
2262 * Display pages allocated per node and memory policy via /proc.
2263 */
1a75a6c8
CL
2264int show_numa_map(struct seq_file *m, void *v)
2265{
99f89551 2266 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
2267 struct vm_area_struct *vma = v;
2268 struct numa_maps *md;
397874df
CL
2269 struct file *file = vma->vm_file;
2270 struct mm_struct *mm = vma->vm_mm;
480eccf9 2271 struct mempolicy *pol;
1a75a6c8
CL
2272 int n;
2273 char buffer[50];
2274
397874df 2275 if (!mm)
1a75a6c8
CL
2276 return 0;
2277
2278 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2279 if (!md)
2280 return 0;
2281
480eccf9 2282 pol = get_vma_policy(priv->task, vma, vma->vm_start);
71fe804b 2283 mpol_to_str(buffer, sizeof(buffer), pol, 0);
52cd3b07 2284 mpol_cond_put(pol);
397874df
CL
2285
2286 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2287
2288 if (file) {
2289 seq_printf(m, " file=");
c32c2f63 2290 seq_path(m, &file->f_path, "\n\t= ");
397874df
CL
2291 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2292 seq_printf(m, " heap");
2293 } else if (vma->vm_start <= mm->start_stack &&
2294 vma->vm_end >= mm->start_stack) {
2295 seq_printf(m, " stack");
2296 }
2297
2298 if (is_vm_hugetlb_page(vma)) {
2299 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2300 seq_printf(m, " huge");
2301 } else {
a57ebfdb 2302 check_pgd_range(vma, vma->vm_start, vma->vm_end,
56bbd65d 2303 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
397874df
CL
2304 }
2305
2306 if (!md->pages)
2307 goto out;
1a75a6c8 2308
397874df
CL
2309 if (md->anon)
2310 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 2311
397874df
CL
2312 if (md->dirty)
2313 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 2314
397874df
CL
2315 if (md->pages != md->anon && md->pages != md->dirty)
2316 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 2317
397874df
CL
2318 if (md->mapcount_max > 1)
2319 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 2320
397874df
CL
2321 if (md->swapcache)
2322 seq_printf(m," swapcache=%lu", md->swapcache);
2323
2324 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2325 seq_printf(m," active=%lu", md->active);
2326
2327 if (md->writeback)
2328 seq_printf(m," writeback=%lu", md->writeback);
2329
56bbd65d 2330 for_each_node_state(n, N_HIGH_MEMORY)
397874df
CL
2331 if (md->node[n])
2332 seq_printf(m, " N%d=%lu", n, md->node[n]);
2333out:
2334 seq_putc(m, '\n');
1a75a6c8
CL
2335 kfree(md);
2336
2337 if (m->count < m->size)
99f89551 2338 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
2339 return 0;
2340}