]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/mempolicy.c
oom: stop allocating user memory if TIF_MEMDIE is set
[net-next-2.6.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
75#include <linux/mm.h>
76#include <linux/nodemask.h>
77#include <linux/cpuset.h>
78#include <linux/gfp.h>
79#include <linux/slab.h>
80#include <linux/string.h>
81#include <linux/module.h>
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
85#include <linux/mempolicy.h>
dc9aa5b9 86#include <linux/swap.h>
1a75a6c8
CL
87#include <linux/seq_file.h>
88#include <linux/proc_fs.h>
b20a3503 89#include <linux/migrate.h>
95a402c3 90#include <linux/rmap.h>
86c3a764 91#include <linux/security.h>
dc9aa5b9 92
1da177e4
LT
93#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
38e35860 96/* Internal flags */
dc9aa5b9 97#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 98#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 99#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 100
fcc234f8
PE
101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
1da177e4
LT
103
104#define PDprintk(fmt...)
105
106/* Highest zone. An specific allocation for a zone below that is not
107 policied. */
6267276f 108enum zone_type policy_zone = 0;
1da177e4 109
d42c6997 110struct mempolicy default_policy = {
1da177e4
LT
111 .refcnt = ATOMIC_INIT(1), /* never free it */
112 .policy = MPOL_DEFAULT,
113};
114
1da177e4 115/* Do sanity checking on a policy */
dfcd3c0d 116static int mpol_check_policy(int mode, nodemask_t *nodes)
1da177e4 117{
dfcd3c0d 118 int empty = nodes_empty(*nodes);
1da177e4
LT
119
120 switch (mode) {
121 case MPOL_DEFAULT:
122 if (!empty)
123 return -EINVAL;
124 break;
125 case MPOL_BIND:
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
128 more for now. */
129 if (empty)
130 return -EINVAL;
131 break;
132 }
dfcd3c0d 133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
1da177e4 134}
dd942ae3 135
1da177e4 136/* Generate a custom zonelist for the BIND policy. */
dfcd3c0d 137static struct zonelist *bind_zonelist(nodemask_t *nodes)
1da177e4
LT
138{
139 struct zonelist *zl;
2f6726e5
CL
140 int num, max, nd;
141 enum zone_type k;
1da177e4 142
dfcd3c0d 143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
9276b1bc 144 max++; /* space for zlcache_ptr (see mmzone.h) */
dd942ae3 145 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
1da177e4 146 if (!zl)
8af5e2eb 147 return ERR_PTR(-ENOMEM);
9276b1bc 148 zl->zlcache_ptr = NULL;
1da177e4 149 num = 0;
dd942ae3
AK
150 /* First put in the highest zones from all nodes, then all the next
151 lower zones etc. Avoid empty zones because the memory allocator
152 doesn't like them. If you implement node hot removal you
153 have to fix that. */
2f6726e5
CL
154 k = policy_zone;
155 while (1) {
dd942ae3
AK
156 for_each_node_mask(nd, *nodes) {
157 struct zone *z = &NODE_DATA(nd)->node_zones[k];
158 if (z->present_pages > 0)
159 zl->zones[num++] = z;
160 }
2f6726e5
CL
161 if (k == 0)
162 break;
163 k--;
dd942ae3 164 }
8af5e2eb
KH
165 if (num == 0) {
166 kfree(zl);
167 return ERR_PTR(-EINVAL);
168 }
1da177e4
LT
169 zl->zones[num] = NULL;
170 return zl;
171}
172
173/* Create a new policy */
dfcd3c0d 174static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
1da177e4
LT
175{
176 struct mempolicy *policy;
177
dfcd3c0d 178 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
1da177e4
LT
179 if (mode == MPOL_DEFAULT)
180 return NULL;
181 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
182 if (!policy)
183 return ERR_PTR(-ENOMEM);
184 atomic_set(&policy->refcnt, 1);
185 switch (mode) {
186 case MPOL_INTERLEAVE:
dfcd3c0d 187 policy->v.nodes = *nodes;
8f493d79
AK
188 if (nodes_weight(*nodes) == 0) {
189 kmem_cache_free(policy_cache, policy);
190 return ERR_PTR(-EINVAL);
191 }
1da177e4
LT
192 break;
193 case MPOL_PREFERRED:
dfcd3c0d 194 policy->v.preferred_node = first_node(*nodes);
1da177e4
LT
195 if (policy->v.preferred_node >= MAX_NUMNODES)
196 policy->v.preferred_node = -1;
197 break;
198 case MPOL_BIND:
199 policy->v.zonelist = bind_zonelist(nodes);
8af5e2eb
KH
200 if (IS_ERR(policy->v.zonelist)) {
201 void *error_code = policy->v.zonelist;
1da177e4 202 kmem_cache_free(policy_cache, policy);
8af5e2eb 203 return error_code;
1da177e4
LT
204 }
205 break;
206 }
207 policy->policy = mode;
74cb2155 208 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
1da177e4
LT
209 return policy;
210}
211
397874df 212static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
213static void migrate_page_add(struct page *page, struct list_head *pagelist,
214 unsigned long flags);
1a75a6c8 215
38e35860 216/* Scan through pages checking if pages follow certain conditions. */
b5810039 217static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
218 unsigned long addr, unsigned long end,
219 const nodemask_t *nodes, unsigned long flags,
38e35860 220 void *private)
1da177e4 221{
91612e0d
HD
222 pte_t *orig_pte;
223 pte_t *pte;
705e87c0 224 spinlock_t *ptl;
941150a3 225
705e87c0 226 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 227 do {
6aab341e 228 struct page *page;
25ba77c1 229 int nid;
91612e0d
HD
230
231 if (!pte_present(*pte))
1da177e4 232 continue;
6aab341e
LT
233 page = vm_normal_page(vma, addr, *pte);
234 if (!page)
1da177e4 235 continue;
053837fc
NP
236 /*
237 * The check for PageReserved here is important to avoid
238 * handling zero pages and other pages that may have been
239 * marked special by the system.
240 *
241 * If the PageReserved would not be checked here then f.e.
242 * the location of the zero page could have an influence
243 * on MPOL_MF_STRICT, zero pages would be counted for
244 * the per node stats, and there would be useless attempts
245 * to put zero pages on the migration list.
246 */
f4598c8b
CL
247 if (PageReserved(page))
248 continue;
6aab341e 249 nid = page_to_nid(page);
38e35860
CL
250 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
251 continue;
252
1a75a6c8 253 if (flags & MPOL_MF_STATS)
397874df 254 gather_stats(page, private, pte_dirty(*pte));
053837fc 255 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 256 migrate_page_add(page, private, flags);
38e35860
CL
257 else
258 break;
91612e0d 259 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 260 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
261 return addr != end;
262}
263
b5810039 264static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
265 unsigned long addr, unsigned long end,
266 const nodemask_t *nodes, unsigned long flags,
38e35860 267 void *private)
91612e0d
HD
268{
269 pmd_t *pmd;
270 unsigned long next;
271
272 pmd = pmd_offset(pud, addr);
273 do {
274 next = pmd_addr_end(addr, end);
275 if (pmd_none_or_clear_bad(pmd))
276 continue;
dc9aa5b9 277 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 278 flags, private))
91612e0d
HD
279 return -EIO;
280 } while (pmd++, addr = next, addr != end);
281 return 0;
282}
283
b5810039 284static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
285 unsigned long addr, unsigned long end,
286 const nodemask_t *nodes, unsigned long flags,
38e35860 287 void *private)
91612e0d
HD
288{
289 pud_t *pud;
290 unsigned long next;
291
292 pud = pud_offset(pgd, addr);
293 do {
294 next = pud_addr_end(addr, end);
295 if (pud_none_or_clear_bad(pud))
296 continue;
dc9aa5b9 297 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 298 flags, private))
91612e0d
HD
299 return -EIO;
300 } while (pud++, addr = next, addr != end);
301 return 0;
302}
303
b5810039 304static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
305 unsigned long addr, unsigned long end,
306 const nodemask_t *nodes, unsigned long flags,
38e35860 307 void *private)
91612e0d
HD
308{
309 pgd_t *pgd;
310 unsigned long next;
311
b5810039 312 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
313 do {
314 next = pgd_addr_end(addr, end);
315 if (pgd_none_or_clear_bad(pgd))
316 continue;
dc9aa5b9 317 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 318 flags, private))
91612e0d
HD
319 return -EIO;
320 } while (pgd++, addr = next, addr != end);
321 return 0;
1da177e4
LT
322}
323
dc9aa5b9
CL
324/*
325 * Check if all pages in a range are on a set of nodes.
326 * If pagelist != NULL then isolate pages from the LRU and
327 * put them on the pagelist.
328 */
1da177e4
LT
329static struct vm_area_struct *
330check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 331 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
332{
333 int err;
334 struct vm_area_struct *first, *vma, *prev;
335
90036ee5 336 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
90036ee5 337
b20a3503
CL
338 err = migrate_prep();
339 if (err)
340 return ERR_PTR(err);
90036ee5 341 }
053837fc 342
1da177e4
LT
343 first = find_vma(mm, start);
344 if (!first)
345 return ERR_PTR(-EFAULT);
346 prev = NULL;
347 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
348 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
349 if (!vma->vm_next && vma->vm_end < end)
350 return ERR_PTR(-EFAULT);
351 if (prev && prev->vm_end < vma->vm_start)
352 return ERR_PTR(-EFAULT);
353 }
354 if (!is_vm_hugetlb_page(vma) &&
355 ((flags & MPOL_MF_STRICT) ||
356 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
357 vma_migratable(vma)))) {
5b952b3c 358 unsigned long endvma = vma->vm_end;
dc9aa5b9 359
5b952b3c
AK
360 if (endvma > end)
361 endvma = end;
362 if (vma->vm_start > start)
363 start = vma->vm_start;
dc9aa5b9 364 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 365 flags, private);
1da177e4
LT
366 if (err) {
367 first = ERR_PTR(err);
368 break;
369 }
370 }
371 prev = vma;
372 }
373 return first;
374}
375
376/* Apply policy to a single VMA */
377static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
378{
379 int err = 0;
380 struct mempolicy *old = vma->vm_policy;
381
382 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
383 vma->vm_start, vma->vm_end, vma->vm_pgoff,
384 vma->vm_ops, vma->vm_file,
385 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
386
387 if (vma->vm_ops && vma->vm_ops->set_policy)
388 err = vma->vm_ops->set_policy(vma, new);
389 if (!err) {
390 mpol_get(new);
391 vma->vm_policy = new;
392 mpol_free(old);
393 }
394 return err;
395}
396
397/* Step 2: apply policy to a range and do splits. */
398static int mbind_range(struct vm_area_struct *vma, unsigned long start,
399 unsigned long end, struct mempolicy *new)
400{
401 struct vm_area_struct *next;
402 int err;
403
404 err = 0;
405 for (; vma && vma->vm_start < end; vma = next) {
406 next = vma->vm_next;
407 if (vma->vm_start < start)
408 err = split_vma(vma->vm_mm, vma, start, 1);
409 if (!err && vma->vm_end > end)
410 err = split_vma(vma->vm_mm, vma, end, 0);
411 if (!err)
412 err = policy_vma(vma, new);
413 if (err)
414 break;
415 }
416 return err;
417}
418
8bccd85f
CL
419static int contextualize_policy(int mode, nodemask_t *nodes)
420{
421 if (!nodes)
422 return 0;
423
cf2a473c 424 cpuset_update_task_memory_state();
5966514d
PJ
425 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
426 return -EINVAL;
8bccd85f
CL
427 return mpol_check_policy(mode, nodes);
428}
429
c61afb18
PJ
430
431/*
432 * Update task->flags PF_MEMPOLICY bit: set iff non-default
433 * mempolicy. Allows more rapid checking of this (combined perhaps
434 * with other PF_* flag bits) on memory allocation hot code paths.
435 *
436 * If called from outside this file, the task 'p' should -only- be
437 * a newly forked child not yet visible on the task list, because
438 * manipulating the task flags of a visible task is not safe.
439 *
440 * The above limitation is why this routine has the funny name
441 * mpol_fix_fork_child_flag().
442 *
443 * It is also safe to call this with a task pointer of current,
444 * which the static wrapper mpol_set_task_struct_flag() does,
445 * for use within this file.
446 */
447
448void mpol_fix_fork_child_flag(struct task_struct *p)
449{
450 if (p->mempolicy)
451 p->flags |= PF_MEMPOLICY;
452 else
453 p->flags &= ~PF_MEMPOLICY;
454}
455
456static void mpol_set_task_struct_flag(void)
457{
458 mpol_fix_fork_child_flag(current);
459}
460
1da177e4 461/* Set the process memory policy */
8bccd85f 462long do_set_mempolicy(int mode, nodemask_t *nodes)
1da177e4 463{
1da177e4 464 struct mempolicy *new;
1da177e4 465
8bccd85f 466 if (contextualize_policy(mode, nodes))
1da177e4 467 return -EINVAL;
8bccd85f 468 new = mpol_new(mode, nodes);
1da177e4
LT
469 if (IS_ERR(new))
470 return PTR_ERR(new);
471 mpol_free(current->mempolicy);
472 current->mempolicy = new;
c61afb18 473 mpol_set_task_struct_flag();
1da177e4 474 if (new && new->policy == MPOL_INTERLEAVE)
dfcd3c0d 475 current->il_next = first_node(new->v.nodes);
1da177e4
LT
476 return 0;
477}
478
479/* Fill a zone bitmap for a policy */
dfcd3c0d 480static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4
LT
481{
482 int i;
483
dfcd3c0d 484 nodes_clear(*nodes);
1da177e4
LT
485 switch (p->policy) {
486 case MPOL_BIND:
487 for (i = 0; p->v.zonelist->zones[i]; i++)
89fa3024 488 node_set(zone_to_nid(p->v.zonelist->zones[i]),
8bccd85f 489 *nodes);
1da177e4
LT
490 break;
491 case MPOL_DEFAULT:
492 break;
493 case MPOL_INTERLEAVE:
dfcd3c0d 494 *nodes = p->v.nodes;
1da177e4
LT
495 break;
496 case MPOL_PREFERRED:
497 /* or use current node instead of online map? */
498 if (p->v.preferred_node < 0)
dfcd3c0d 499 *nodes = node_online_map;
1da177e4 500 else
dfcd3c0d 501 node_set(p->v.preferred_node, *nodes);
1da177e4
LT
502 break;
503 default:
504 BUG();
505 }
506}
507
508static int lookup_node(struct mm_struct *mm, unsigned long addr)
509{
510 struct page *p;
511 int err;
512
513 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
514 if (err >= 0) {
515 err = page_to_nid(p);
516 put_page(p);
517 }
518 return err;
519}
520
1da177e4 521/* Retrieve NUMA policy */
8bccd85f
CL
522long do_get_mempolicy(int *policy, nodemask_t *nmask,
523 unsigned long addr, unsigned long flags)
1da177e4 524{
8bccd85f 525 int err;
1da177e4
LT
526 struct mm_struct *mm = current->mm;
527 struct vm_area_struct *vma = NULL;
528 struct mempolicy *pol = current->mempolicy;
529
cf2a473c 530 cpuset_update_task_memory_state();
1da177e4
LT
531 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
532 return -EINVAL;
1da177e4
LT
533 if (flags & MPOL_F_ADDR) {
534 down_read(&mm->mmap_sem);
535 vma = find_vma_intersection(mm, addr, addr+1);
536 if (!vma) {
537 up_read(&mm->mmap_sem);
538 return -EFAULT;
539 }
540 if (vma->vm_ops && vma->vm_ops->get_policy)
541 pol = vma->vm_ops->get_policy(vma, addr);
542 else
543 pol = vma->vm_policy;
544 } else if (addr)
545 return -EINVAL;
546
547 if (!pol)
548 pol = &default_policy;
549
550 if (flags & MPOL_F_NODE) {
551 if (flags & MPOL_F_ADDR) {
552 err = lookup_node(mm, addr);
553 if (err < 0)
554 goto out;
8bccd85f 555 *policy = err;
1da177e4
LT
556 } else if (pol == current->mempolicy &&
557 pol->policy == MPOL_INTERLEAVE) {
8bccd85f 558 *policy = current->il_next;
1da177e4
LT
559 } else {
560 err = -EINVAL;
561 goto out;
562 }
563 } else
8bccd85f 564 *policy = pol->policy;
1da177e4
LT
565
566 if (vma) {
567 up_read(&current->mm->mmap_sem);
568 vma = NULL;
569 }
570
1da177e4 571 err = 0;
8bccd85f
CL
572 if (nmask)
573 get_zonemask(pol, nmask);
1da177e4
LT
574
575 out:
576 if (vma)
577 up_read(&current->mm->mmap_sem);
578 return err;
579}
580
b20a3503 581#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
582/*
583 * page migration
584 */
fc301289
CL
585static void migrate_page_add(struct page *page, struct list_head *pagelist,
586 unsigned long flags)
6ce3c4c0
CL
587{
588 /*
fc301289 589 * Avoid migrating a page that is shared with others.
6ce3c4c0 590 */
b20a3503
CL
591 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
592 isolate_lru_page(page, pagelist);
7e2ab150 593}
6ce3c4c0 594
742755a1 595static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3
CL
596{
597 return alloc_pages_node(node, GFP_HIGHUSER, 0);
598}
599
7e2ab150
CL
600/*
601 * Migrate pages from one node to a target node.
602 * Returns error or the number of pages not migrated.
603 */
604int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
605{
606 nodemask_t nmask;
607 LIST_HEAD(pagelist);
608 int err = 0;
609
610 nodes_clear(nmask);
611 node_set(source, nmask);
6ce3c4c0 612
7e2ab150
CL
613 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
614 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
615
aaa994b3 616 if (!list_empty(&pagelist))
95a402c3
CL
617 err = migrate_pages(&pagelist, new_node_page, dest);
618
7e2ab150 619 return err;
6ce3c4c0
CL
620}
621
39743889 622/*
7e2ab150
CL
623 * Move pages between the two nodesets so as to preserve the physical
624 * layout as much as possible.
39743889
CL
625 *
626 * Returns the number of page that could not be moved.
627 */
628int do_migrate_pages(struct mm_struct *mm,
629 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
630{
631 LIST_HEAD(pagelist);
7e2ab150
CL
632 int busy = 0;
633 int err = 0;
634 nodemask_t tmp;
39743889 635
7e2ab150 636 down_read(&mm->mmap_sem);
39743889 637
7b2259b3
CL
638 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
639 if (err)
640 goto out;
641
7e2ab150
CL
642/*
643 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
644 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
645 * bit in 'tmp', and return that <source, dest> pair for migration.
646 * The pair of nodemasks 'to' and 'from' define the map.
647 *
648 * If no pair of bits is found that way, fallback to picking some
649 * pair of 'source' and 'dest' bits that are not the same. If the
650 * 'source' and 'dest' bits are the same, this represents a node
651 * that will be migrating to itself, so no pages need move.
652 *
653 * If no bits are left in 'tmp', or if all remaining bits left
654 * in 'tmp' correspond to the same bit in 'to', return false
655 * (nothing left to migrate).
656 *
657 * This lets us pick a pair of nodes to migrate between, such that
658 * if possible the dest node is not already occupied by some other
659 * source node, minimizing the risk of overloading the memory on a
660 * node that would happen if we migrated incoming memory to a node
661 * before migrating outgoing memory source that same node.
662 *
663 * A single scan of tmp is sufficient. As we go, we remember the
664 * most recent <s, d> pair that moved (s != d). If we find a pair
665 * that not only moved, but what's better, moved to an empty slot
666 * (d is not set in tmp), then we break out then, with that pair.
667 * Otherwise when we finish scannng from_tmp, we at least have the
668 * most recent <s, d> pair that moved. If we get all the way through
669 * the scan of tmp without finding any node that moved, much less
670 * moved to an empty node, then there is nothing left worth migrating.
671 */
d4984711 672
7e2ab150
CL
673 tmp = *from_nodes;
674 while (!nodes_empty(tmp)) {
675 int s,d;
676 int source = -1;
677 int dest = 0;
678
679 for_each_node_mask(s, tmp) {
680 d = node_remap(s, *from_nodes, *to_nodes);
681 if (s == d)
682 continue;
683
684 source = s; /* Node moved. Memorize */
685 dest = d;
686
687 /* dest not in remaining from nodes? */
688 if (!node_isset(dest, tmp))
689 break;
690 }
691 if (source == -1)
692 break;
693
694 node_clear(source, tmp);
695 err = migrate_to_node(mm, source, dest, flags);
696 if (err > 0)
697 busy += err;
698 if (err < 0)
699 break;
39743889 700 }
7b2259b3 701out:
39743889 702 up_read(&mm->mmap_sem);
7e2ab150
CL
703 if (err < 0)
704 return err;
705 return busy;
b20a3503
CL
706
707}
708
742755a1 709static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
710{
711 struct vm_area_struct *vma = (struct vm_area_struct *)private;
712
713 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
714}
b20a3503
CL
715#else
716
717static void migrate_page_add(struct page *page, struct list_head *pagelist,
718 unsigned long flags)
719{
39743889
CL
720}
721
b20a3503
CL
722int do_migrate_pages(struct mm_struct *mm,
723 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
724{
725 return -ENOSYS;
726}
95a402c3 727
69939749 728static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
729{
730 return NULL;
731}
b20a3503
CL
732#endif
733
6ce3c4c0
CL
734long do_mbind(unsigned long start, unsigned long len,
735 unsigned long mode, nodemask_t *nmask, unsigned long flags)
736{
737 struct vm_area_struct *vma;
738 struct mm_struct *mm = current->mm;
739 struct mempolicy *new;
740 unsigned long end;
741 int err;
742 LIST_HEAD(pagelist);
743
744 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
745 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
746 || mode > MPOL_MAX)
747 return -EINVAL;
74c00241 748 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
749 return -EPERM;
750
751 if (start & ~PAGE_MASK)
752 return -EINVAL;
753
754 if (mode == MPOL_DEFAULT)
755 flags &= ~MPOL_MF_STRICT;
756
757 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
758 end = start + len;
759
760 if (end < start)
761 return -EINVAL;
762 if (end == start)
763 return 0;
764
765 if (mpol_check_policy(mode, nmask))
766 return -EINVAL;
767
768 new = mpol_new(mode, nmask);
769 if (IS_ERR(new))
770 return PTR_ERR(new);
771
772 /*
773 * If we are using the default policy then operation
774 * on discontinuous address spaces is okay after all
775 */
776 if (!new)
777 flags |= MPOL_MF_DISCONTIG_OK;
778
779 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
780 mode,nodes_addr(nodes)[0]);
781
782 down_write(&mm->mmap_sem);
783 vma = check_range(mm, start, end, nmask,
784 flags | MPOL_MF_INVERT, &pagelist);
785
786 err = PTR_ERR(vma);
787 if (!IS_ERR(vma)) {
788 int nr_failed = 0;
789
790 err = mbind_range(vma, start, end, new);
7e2ab150 791
6ce3c4c0 792 if (!list_empty(&pagelist))
95a402c3
CL
793 nr_failed = migrate_pages(&pagelist, new_vma_page,
794 (unsigned long)vma);
6ce3c4c0
CL
795
796 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
797 err = -EIO;
798 }
b20a3503 799
6ce3c4c0
CL
800 up_write(&mm->mmap_sem);
801 mpol_free(new);
802 return err;
803}
804
8bccd85f
CL
805/*
806 * User space interface with variable sized bitmaps for nodelists.
807 */
808
809/* Copy a node mask from user space. */
39743889 810static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
811 unsigned long maxnode)
812{
813 unsigned long k;
814 unsigned long nlongs;
815 unsigned long endmask;
816
817 --maxnode;
818 nodes_clear(*nodes);
819 if (maxnode == 0 || !nmask)
820 return 0;
a9c930ba 821 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 822 return -EINVAL;
8bccd85f
CL
823
824 nlongs = BITS_TO_LONGS(maxnode);
825 if ((maxnode % BITS_PER_LONG) == 0)
826 endmask = ~0UL;
827 else
828 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
829
830 /* When the user specified more nodes than supported just check
831 if the non supported part is all zero. */
832 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
833 if (nlongs > PAGE_SIZE/sizeof(long))
834 return -EINVAL;
835 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
836 unsigned long t;
837 if (get_user(t, nmask + k))
838 return -EFAULT;
839 if (k == nlongs - 1) {
840 if (t & endmask)
841 return -EINVAL;
842 } else if (t)
843 return -EINVAL;
844 }
845 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
846 endmask = ~0UL;
847 }
848
849 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
850 return -EFAULT;
851 nodes_addr(*nodes)[nlongs-1] &= endmask;
852 return 0;
853}
854
855/* Copy a kernel node mask to user space */
856static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
857 nodemask_t *nodes)
858{
859 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
860 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
861
862 if (copy > nbytes) {
863 if (copy > PAGE_SIZE)
864 return -EINVAL;
865 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
866 return -EFAULT;
867 copy = nbytes;
868 }
869 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
870}
871
872asmlinkage long sys_mbind(unsigned long start, unsigned long len,
873 unsigned long mode,
874 unsigned long __user *nmask, unsigned long maxnode,
875 unsigned flags)
876{
877 nodemask_t nodes;
878 int err;
879
880 err = get_nodes(&nodes, nmask, maxnode);
881 if (err)
882 return err;
30150f8d
CL
883#ifdef CONFIG_CPUSETS
884 /* Restrict the nodes to the allowed nodes in the cpuset */
885 nodes_and(nodes, nodes, current->mems_allowed);
886#endif
8bccd85f
CL
887 return do_mbind(start, len, mode, &nodes, flags);
888}
889
890/* Set the process memory policy */
891asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
892 unsigned long maxnode)
893{
894 int err;
895 nodemask_t nodes;
896
897 if (mode < 0 || mode > MPOL_MAX)
898 return -EINVAL;
899 err = get_nodes(&nodes, nmask, maxnode);
900 if (err)
901 return err;
902 return do_set_mempolicy(mode, &nodes);
903}
904
39743889
CL
905asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
906 const unsigned long __user *old_nodes,
907 const unsigned long __user *new_nodes)
908{
909 struct mm_struct *mm;
910 struct task_struct *task;
911 nodemask_t old;
912 nodemask_t new;
913 nodemask_t task_nodes;
914 int err;
915
916 err = get_nodes(&old, old_nodes, maxnode);
917 if (err)
918 return err;
919
920 err = get_nodes(&new, new_nodes, maxnode);
921 if (err)
922 return err;
923
924 /* Find the mm_struct */
925 read_lock(&tasklist_lock);
926 task = pid ? find_task_by_pid(pid) : current;
927 if (!task) {
928 read_unlock(&tasklist_lock);
929 return -ESRCH;
930 }
931 mm = get_task_mm(task);
932 read_unlock(&tasklist_lock);
933
934 if (!mm)
935 return -EINVAL;
936
937 /*
938 * Check if this process has the right to modify the specified
939 * process. The right exists if the process has administrative
7f927fcc 940 * capabilities, superuser privileges or the same
39743889
CL
941 * userid as the target process.
942 */
943 if ((current->euid != task->suid) && (current->euid != task->uid) &&
944 (current->uid != task->suid) && (current->uid != task->uid) &&
74c00241 945 !capable(CAP_SYS_NICE)) {
39743889
CL
946 err = -EPERM;
947 goto out;
948 }
949
950 task_nodes = cpuset_mems_allowed(task);
951 /* Is the user allowed to access the target nodes? */
74c00241 952 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
953 err = -EPERM;
954 goto out;
955 }
956
86c3a764
DQ
957 err = security_task_movememory(task);
958 if (err)
959 goto out;
960
511030bc 961 err = do_migrate_pages(mm, &old, &new,
74c00241 962 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
963out:
964 mmput(mm);
965 return err;
966}
967
968
8bccd85f
CL
969/* Retrieve NUMA policy */
970asmlinkage long sys_get_mempolicy(int __user *policy,
971 unsigned long __user *nmask,
972 unsigned long maxnode,
973 unsigned long addr, unsigned long flags)
974{
975 int err, pval;
976 nodemask_t nodes;
977
978 if (nmask != NULL && maxnode < MAX_NUMNODES)
979 return -EINVAL;
980
981 err = do_get_mempolicy(&pval, &nodes, addr, flags);
982
983 if (err)
984 return err;
985
986 if (policy && put_user(pval, policy))
987 return -EFAULT;
988
989 if (nmask)
990 err = copy_nodes_to_user(nmask, maxnode, &nodes);
991
992 return err;
993}
994
1da177e4
LT
995#ifdef CONFIG_COMPAT
996
997asmlinkage long compat_sys_get_mempolicy(int __user *policy,
998 compat_ulong_t __user *nmask,
999 compat_ulong_t maxnode,
1000 compat_ulong_t addr, compat_ulong_t flags)
1001{
1002 long err;
1003 unsigned long __user *nm = NULL;
1004 unsigned long nr_bits, alloc_size;
1005 DECLARE_BITMAP(bm, MAX_NUMNODES);
1006
1007 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1008 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1009
1010 if (nmask)
1011 nm = compat_alloc_user_space(alloc_size);
1012
1013 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1014
1015 if (!err && nmask) {
1016 err = copy_from_user(bm, nm, alloc_size);
1017 /* ensure entire bitmap is zeroed */
1018 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1019 err |= compat_put_bitmap(nmask, bm, nr_bits);
1020 }
1021
1022 return err;
1023}
1024
1025asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1026 compat_ulong_t maxnode)
1027{
1028 long err = 0;
1029 unsigned long __user *nm = NULL;
1030 unsigned long nr_bits, alloc_size;
1031 DECLARE_BITMAP(bm, MAX_NUMNODES);
1032
1033 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1034 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1035
1036 if (nmask) {
1037 err = compat_get_bitmap(bm, nmask, nr_bits);
1038 nm = compat_alloc_user_space(alloc_size);
1039 err |= copy_to_user(nm, bm, alloc_size);
1040 }
1041
1042 if (err)
1043 return -EFAULT;
1044
1045 return sys_set_mempolicy(mode, nm, nr_bits+1);
1046}
1047
1048asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1049 compat_ulong_t mode, compat_ulong_t __user *nmask,
1050 compat_ulong_t maxnode, compat_ulong_t flags)
1051{
1052 long err = 0;
1053 unsigned long __user *nm = NULL;
1054 unsigned long nr_bits, alloc_size;
dfcd3c0d 1055 nodemask_t bm;
1da177e4
LT
1056
1057 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1058 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1059
1060 if (nmask) {
dfcd3c0d 1061 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1062 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1063 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1064 }
1065
1066 if (err)
1067 return -EFAULT;
1068
1069 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1070}
1071
1072#endif
1073
1074/* Return effective policy for a VMA */
48fce342
CL
1075static struct mempolicy * get_vma_policy(struct task_struct *task,
1076 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1077{
6e21c8f1 1078 struct mempolicy *pol = task->mempolicy;
1da177e4
LT
1079
1080 if (vma) {
1081 if (vma->vm_ops && vma->vm_ops->get_policy)
8bccd85f 1082 pol = vma->vm_ops->get_policy(vma, addr);
1da177e4
LT
1083 else if (vma->vm_policy &&
1084 vma->vm_policy->policy != MPOL_DEFAULT)
1085 pol = vma->vm_policy;
1086 }
1087 if (!pol)
1088 pol = &default_policy;
1089 return pol;
1090}
1091
1092/* Return a zonelist representing a mempolicy */
dd0fc66f 1093static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1da177e4
LT
1094{
1095 int nd;
1096
1097 switch (policy->policy) {
1098 case MPOL_PREFERRED:
1099 nd = policy->v.preferred_node;
1100 if (nd < 0)
1101 nd = numa_node_id();
1102 break;
1103 case MPOL_BIND:
1104 /* Lower zones don't get a policy applied */
1105 /* Careful: current->mems_allowed might have moved */
19655d34 1106 if (gfp_zone(gfp) >= policy_zone)
1da177e4
LT
1107 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1108 return policy->v.zonelist;
1109 /*FALL THROUGH*/
1110 case MPOL_INTERLEAVE: /* should not happen */
1111 case MPOL_DEFAULT:
1112 nd = numa_node_id();
1113 break;
1114 default:
1115 nd = 0;
1116 BUG();
1117 }
af4ca457 1118 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1da177e4
LT
1119}
1120
1121/* Do dynamic interleaving for a process */
1122static unsigned interleave_nodes(struct mempolicy *policy)
1123{
1124 unsigned nid, next;
1125 struct task_struct *me = current;
1126
1127 nid = me->il_next;
dfcd3c0d 1128 next = next_node(nid, policy->v.nodes);
1da177e4 1129 if (next >= MAX_NUMNODES)
dfcd3c0d 1130 next = first_node(policy->v.nodes);
1da177e4
LT
1131 me->il_next = next;
1132 return nid;
1133}
1134
dc85da15
CL
1135/*
1136 * Depending on the memory policy provide a node from which to allocate the
1137 * next slab entry.
1138 */
1139unsigned slab_node(struct mempolicy *policy)
1140{
765c4507
CL
1141 int pol = policy ? policy->policy : MPOL_DEFAULT;
1142
1143 switch (pol) {
dc85da15
CL
1144 case MPOL_INTERLEAVE:
1145 return interleave_nodes(policy);
1146
1147 case MPOL_BIND:
1148 /*
1149 * Follow bind policy behavior and start allocation at the
1150 * first node.
1151 */
89fa3024 1152 return zone_to_nid(policy->v.zonelist->zones[0]);
dc85da15
CL
1153
1154 case MPOL_PREFERRED:
1155 if (policy->v.preferred_node >= 0)
1156 return policy->v.preferred_node;
1157 /* Fall through */
1158
1159 default:
1160 return numa_node_id();
1161 }
1162}
1163
1da177e4
LT
1164/* Do static interleaving for a VMA with known offset. */
1165static unsigned offset_il_node(struct mempolicy *pol,
1166 struct vm_area_struct *vma, unsigned long off)
1167{
dfcd3c0d 1168 unsigned nnodes = nodes_weight(pol->v.nodes);
1da177e4
LT
1169 unsigned target = (unsigned)off % nnodes;
1170 int c;
1171 int nid = -1;
1172
1173 c = 0;
1174 do {
dfcd3c0d 1175 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1176 c++;
1177 } while (c <= target);
1da177e4
LT
1178 return nid;
1179}
1180
5da7ca86
CL
1181/* Determine a node number for interleave */
1182static inline unsigned interleave_nid(struct mempolicy *pol,
1183 struct vm_area_struct *vma, unsigned long addr, int shift)
1184{
1185 if (vma) {
1186 unsigned long off;
1187
3b98b087
NA
1188 /*
1189 * for small pages, there is no difference between
1190 * shift and PAGE_SHIFT, so the bit-shift is safe.
1191 * for huge pages, since vm_pgoff is in units of small
1192 * pages, we need to shift off the always 0 bits to get
1193 * a useful offset.
1194 */
1195 BUG_ON(shift < PAGE_SHIFT);
1196 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1197 off += (addr - vma->vm_start) >> shift;
1198 return offset_il_node(pol, vma, off);
1199 } else
1200 return interleave_nodes(pol);
1201}
1202
00ac59ad 1203#ifdef CONFIG_HUGETLBFS
5da7ca86
CL
1204/* Return a zonelist suitable for a huge page allocation. */
1205struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1206{
1207 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1208
1209 if (pol->policy == MPOL_INTERLEAVE) {
1210 unsigned nid;
1211
1212 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1213 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
1214 }
1215 return zonelist_policy(GFP_HIGHUSER, pol);
1216}
00ac59ad 1217#endif
5da7ca86 1218
1da177e4
LT
1219/* Allocate a page in interleaved policy.
1220 Own path because it needs to do special accounting. */
662f3a0b
AK
1221static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1222 unsigned nid)
1da177e4
LT
1223{
1224 struct zonelist *zl;
1225 struct page *page;
1226
af4ca457 1227 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1da177e4 1228 page = __alloc_pages(gfp, order, zl);
ca889e6c
CL
1229 if (page && page_zone(page) == zl->zones[0])
1230 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1231 return page;
1232}
1233
1234/**
1235 * alloc_page_vma - Allocate a page for a VMA.
1236 *
1237 * @gfp:
1238 * %GFP_USER user allocation.
1239 * %GFP_KERNEL kernel allocations,
1240 * %GFP_HIGHMEM highmem/user allocations,
1241 * %GFP_FS allocation should not call back into a file system.
1242 * %GFP_ATOMIC don't sleep.
1243 *
1244 * @vma: Pointer to VMA or NULL if not available.
1245 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1246 *
1247 * This function allocates a page from the kernel page pool and applies
1248 * a NUMA policy associated with the VMA or the current process.
1249 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1250 * mm_struct of the VMA to prevent it from going away. Should be used for
1251 * all allocations for pages that will be mapped into
1252 * user space. Returns NULL when no page can be allocated.
1253 *
1254 * Should be called with the mm_sem of the vma hold.
1255 */
1256struct page *
dd0fc66f 1257alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1258{
6e21c8f1 1259 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1da177e4 1260
cf2a473c 1261 cpuset_update_task_memory_state();
1da177e4
LT
1262
1263 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1264 unsigned nid;
5da7ca86
CL
1265
1266 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1da177e4
LT
1267 return alloc_page_interleave(gfp, 0, nid);
1268 }
1269 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1270}
1271
1272/**
1273 * alloc_pages_current - Allocate pages.
1274 *
1275 * @gfp:
1276 * %GFP_USER user allocation,
1277 * %GFP_KERNEL kernel allocation,
1278 * %GFP_HIGHMEM highmem allocation,
1279 * %GFP_FS don't call back into a file system.
1280 * %GFP_ATOMIC don't sleep.
1281 * @order: Power of two of allocation size in pages. 0 is a single page.
1282 *
1283 * Allocate a page from the kernel page pool. When not in
1284 * interrupt context and apply the current process NUMA policy.
1285 * Returns NULL when no page can be allocated.
1286 *
cf2a473c 1287 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1288 * 1) it's ok to take cpuset_sem (can WAIT), and
1289 * 2) allocating for current task (not interrupt).
1290 */
dd0fc66f 1291struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1292{
1293 struct mempolicy *pol = current->mempolicy;
1294
1295 if ((gfp & __GFP_WAIT) && !in_interrupt())
cf2a473c 1296 cpuset_update_task_memory_state();
9b819d20 1297 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4
LT
1298 pol = &default_policy;
1299 if (pol->policy == MPOL_INTERLEAVE)
1300 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1301 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1302}
1303EXPORT_SYMBOL(alloc_pages_current);
1304
4225399a
PJ
1305/*
1306 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1307 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1308 * with the mems_allowed returned by cpuset_mems_allowed(). This
1309 * keeps mempolicies cpuset relative after its cpuset moves. See
1310 * further kernel/cpuset.c update_nodemask().
1311 */
1312void *cpuset_being_rebound;
1313
1da177e4
LT
1314/* Slow path of a mempolicy copy */
1315struct mempolicy *__mpol_copy(struct mempolicy *old)
1316{
1317 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1318
1319 if (!new)
1320 return ERR_PTR(-ENOMEM);
4225399a
PJ
1321 if (current_cpuset_is_being_rebound()) {
1322 nodemask_t mems = cpuset_mems_allowed(current);
1323 mpol_rebind_policy(old, &mems);
1324 }
1da177e4
LT
1325 *new = *old;
1326 atomic_set(&new->refcnt, 1);
1327 if (new->policy == MPOL_BIND) {
1328 int sz = ksize(old->v.zonelist);
e94b1766 1329 new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
1da177e4
LT
1330 if (!new->v.zonelist) {
1331 kmem_cache_free(policy_cache, new);
1332 return ERR_PTR(-ENOMEM);
1333 }
1da177e4
LT
1334 }
1335 return new;
1336}
1337
1338/* Slow path of a mempolicy comparison */
1339int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1340{
1341 if (!a || !b)
1342 return 0;
1343 if (a->policy != b->policy)
1344 return 0;
1345 switch (a->policy) {
1346 case MPOL_DEFAULT:
1347 return 1;
1348 case MPOL_INTERLEAVE:
dfcd3c0d 1349 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4
LT
1350 case MPOL_PREFERRED:
1351 return a->v.preferred_node == b->v.preferred_node;
1352 case MPOL_BIND: {
1353 int i;
1354 for (i = 0; a->v.zonelist->zones[i]; i++)
1355 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1356 return 0;
1357 return b->v.zonelist->zones[i] == NULL;
1358 }
1359 default:
1360 BUG();
1361 return 0;
1362 }
1363}
1364
1365/* Slow path of a mpol destructor. */
1366void __mpol_free(struct mempolicy *p)
1367{
1368 if (!atomic_dec_and_test(&p->refcnt))
1369 return;
1370 if (p->policy == MPOL_BIND)
1371 kfree(p->v.zonelist);
1372 p->policy = MPOL_DEFAULT;
1373 kmem_cache_free(policy_cache, p);
1374}
1375
1da177e4
LT
1376/*
1377 * Shared memory backing store policy support.
1378 *
1379 * Remember policies even when nobody has shared memory mapped.
1380 * The policies are kept in Red-Black tree linked from the inode.
1381 * They are protected by the sp->lock spinlock, which should be held
1382 * for any accesses to the tree.
1383 */
1384
1385/* lookup first element intersecting start-end */
1386/* Caller holds sp->lock */
1387static struct sp_node *
1388sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1389{
1390 struct rb_node *n = sp->root.rb_node;
1391
1392 while (n) {
1393 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1394
1395 if (start >= p->end)
1396 n = n->rb_right;
1397 else if (end <= p->start)
1398 n = n->rb_left;
1399 else
1400 break;
1401 }
1402 if (!n)
1403 return NULL;
1404 for (;;) {
1405 struct sp_node *w = NULL;
1406 struct rb_node *prev = rb_prev(n);
1407 if (!prev)
1408 break;
1409 w = rb_entry(prev, struct sp_node, nd);
1410 if (w->end <= start)
1411 break;
1412 n = prev;
1413 }
1414 return rb_entry(n, struct sp_node, nd);
1415}
1416
1417/* Insert a new shared policy into the list. */
1418/* Caller holds sp->lock */
1419static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1420{
1421 struct rb_node **p = &sp->root.rb_node;
1422 struct rb_node *parent = NULL;
1423 struct sp_node *nd;
1424
1425 while (*p) {
1426 parent = *p;
1427 nd = rb_entry(parent, struct sp_node, nd);
1428 if (new->start < nd->start)
1429 p = &(*p)->rb_left;
1430 else if (new->end > nd->end)
1431 p = &(*p)->rb_right;
1432 else
1433 BUG();
1434 }
1435 rb_link_node(&new->nd, parent, p);
1436 rb_insert_color(&new->nd, &sp->root);
1437 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
1438 new->policy ? new->policy->policy : 0);
1439}
1440
1441/* Find shared policy intersecting idx */
1442struct mempolicy *
1443mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1444{
1445 struct mempolicy *pol = NULL;
1446 struct sp_node *sn;
1447
1448 if (!sp->root.rb_node)
1449 return NULL;
1450 spin_lock(&sp->lock);
1451 sn = sp_lookup(sp, idx, idx+1);
1452 if (sn) {
1453 mpol_get(sn->policy);
1454 pol = sn->policy;
1455 }
1456 spin_unlock(&sp->lock);
1457 return pol;
1458}
1459
1460static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1461{
1462 PDprintk("deleting %lx-l%x\n", n->start, n->end);
1463 rb_erase(&n->nd, &sp->root);
1464 mpol_free(n->policy);
1465 kmem_cache_free(sn_cache, n);
1466}
1467
1468struct sp_node *
1469sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1470{
1471 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1472
1473 if (!n)
1474 return NULL;
1475 n->start = start;
1476 n->end = end;
1477 mpol_get(pol);
1478 n->policy = pol;
1479 return n;
1480}
1481
1482/* Replace a policy range. */
1483static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1484 unsigned long end, struct sp_node *new)
1485{
1486 struct sp_node *n, *new2 = NULL;
1487
1488restart:
1489 spin_lock(&sp->lock);
1490 n = sp_lookup(sp, start, end);
1491 /* Take care of old policies in the same range. */
1492 while (n && n->start < end) {
1493 struct rb_node *next = rb_next(&n->nd);
1494 if (n->start >= start) {
1495 if (n->end <= end)
1496 sp_delete(sp, n);
1497 else
1498 n->start = end;
1499 } else {
1500 /* Old policy spanning whole new range. */
1501 if (n->end > end) {
1502 if (!new2) {
1503 spin_unlock(&sp->lock);
1504 new2 = sp_alloc(end, n->end, n->policy);
1505 if (!new2)
1506 return -ENOMEM;
1507 goto restart;
1508 }
1509 n->end = start;
1510 sp_insert(sp, new2);
1511 new2 = NULL;
1512 break;
1513 } else
1514 n->end = start;
1515 }
1516 if (!next)
1517 break;
1518 n = rb_entry(next, struct sp_node, nd);
1519 }
1520 if (new)
1521 sp_insert(sp, new);
1522 spin_unlock(&sp->lock);
1523 if (new2) {
1524 mpol_free(new2->policy);
1525 kmem_cache_free(sn_cache, new2);
1526 }
1527 return 0;
1528}
1529
7339ff83
RH
1530void mpol_shared_policy_init(struct shared_policy *info, int policy,
1531 nodemask_t *policy_nodes)
1532{
1533 info->root = RB_ROOT;
1534 spin_lock_init(&info->lock);
1535
1536 if (policy != MPOL_DEFAULT) {
1537 struct mempolicy *newpol;
1538
1539 /* Falls back to MPOL_DEFAULT on any error */
1540 newpol = mpol_new(policy, policy_nodes);
1541 if (!IS_ERR(newpol)) {
1542 /* Create pseudo-vma that contains just the policy */
1543 struct vm_area_struct pvma;
1544
1545 memset(&pvma, 0, sizeof(struct vm_area_struct));
1546 /* Policy covers entire file */
1547 pvma.vm_end = TASK_SIZE;
1548 mpol_set_shared_policy(info, &pvma, newpol);
1549 mpol_free(newpol);
1550 }
1551 }
1552}
1553
1da177e4
LT
1554int mpol_set_shared_policy(struct shared_policy *info,
1555 struct vm_area_struct *vma, struct mempolicy *npol)
1556{
1557 int err;
1558 struct sp_node *new = NULL;
1559 unsigned long sz = vma_pages(vma);
1560
1561 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1562 vma->vm_pgoff,
1563 sz, npol? npol->policy : -1,
dfcd3c0d 1564 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
1565
1566 if (npol) {
1567 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1568 if (!new)
1569 return -ENOMEM;
1570 }
1571 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1572 if (err && new)
1573 kmem_cache_free(sn_cache, new);
1574 return err;
1575}
1576
1577/* Free a backing policy store on inode delete. */
1578void mpol_free_shared_policy(struct shared_policy *p)
1579{
1580 struct sp_node *n;
1581 struct rb_node *next;
1582
1583 if (!p->root.rb_node)
1584 return;
1585 spin_lock(&p->lock);
1586 next = rb_first(&p->root);
1587 while (next) {
1588 n = rb_entry(next, struct sp_node, nd);
1589 next = rb_next(&n->nd);
90c5029e 1590 rb_erase(&n->nd, &p->root);
1da177e4
LT
1591 mpol_free(n->policy);
1592 kmem_cache_free(sn_cache, n);
1593 }
1594 spin_unlock(&p->lock);
1da177e4
LT
1595}
1596
1597/* assumes fs == KERNEL_DS */
1598void __init numa_policy_init(void)
1599{
b71636e2
PM
1600 nodemask_t interleave_nodes;
1601 unsigned long largest = 0;
1602 int nid, prefer = 0;
1603
1da177e4
LT
1604 policy_cache = kmem_cache_create("numa_policy",
1605 sizeof(struct mempolicy),
1606 0, SLAB_PANIC, NULL, NULL);
1607
1608 sn_cache = kmem_cache_create("shared_policy_node",
1609 sizeof(struct sp_node),
1610 0, SLAB_PANIC, NULL, NULL);
1611
b71636e2
PM
1612 /*
1613 * Set interleaving policy for system init. Interleaving is only
1614 * enabled across suitably sized nodes (default is >= 16MB), or
1615 * fall back to the largest node if they're all smaller.
1616 */
1617 nodes_clear(interleave_nodes);
1618 for_each_online_node(nid) {
1619 unsigned long total_pages = node_present_pages(nid);
1620
1621 /* Preserve the largest node */
1622 if (largest < total_pages) {
1623 largest = total_pages;
1624 prefer = nid;
1625 }
1626
1627 /* Interleave this node? */
1628 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1629 node_set(nid, interleave_nodes);
1630 }
1631
1632 /* All too small, use the largest */
1633 if (unlikely(nodes_empty(interleave_nodes)))
1634 node_set(prefer, interleave_nodes);
1da177e4 1635
b71636e2 1636 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
1da177e4
LT
1637 printk("numa_policy_init: interleaving failed\n");
1638}
1639
8bccd85f 1640/* Reset policy of current process to default */
1da177e4
LT
1641void numa_default_policy(void)
1642{
8bccd85f 1643 do_set_mempolicy(MPOL_DEFAULT, NULL);
1da177e4 1644}
68860ec1
PJ
1645
1646/* Migrate a policy to a different set of nodes */
74cb2155 1647void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
68860ec1 1648{
74cb2155 1649 nodemask_t *mpolmask;
68860ec1
PJ
1650 nodemask_t tmp;
1651
1652 if (!pol)
1653 return;
74cb2155
PJ
1654 mpolmask = &pol->cpuset_mems_allowed;
1655 if (nodes_equal(*mpolmask, *newmask))
1656 return;
68860ec1
PJ
1657
1658 switch (pol->policy) {
1659 case MPOL_DEFAULT:
1660 break;
1661 case MPOL_INTERLEAVE:
74cb2155 1662 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
68860ec1 1663 pol->v.nodes = tmp;
74cb2155
PJ
1664 *mpolmask = *newmask;
1665 current->il_next = node_remap(current->il_next,
1666 *mpolmask, *newmask);
68860ec1
PJ
1667 break;
1668 case MPOL_PREFERRED:
1669 pol->v.preferred_node = node_remap(pol->v.preferred_node,
74cb2155
PJ
1670 *mpolmask, *newmask);
1671 *mpolmask = *newmask;
68860ec1
PJ
1672 break;
1673 case MPOL_BIND: {
1674 nodemask_t nodes;
1675 struct zone **z;
1676 struct zonelist *zonelist;
1677
1678 nodes_clear(nodes);
1679 for (z = pol->v.zonelist->zones; *z; z++)
89fa3024 1680 node_set(zone_to_nid(*z), nodes);
74cb2155 1681 nodes_remap(tmp, nodes, *mpolmask, *newmask);
68860ec1
PJ
1682 nodes = tmp;
1683
1684 zonelist = bind_zonelist(&nodes);
1685
1686 /* If no mem, then zonelist is NULL and we keep old zonelist.
1687 * If that old zonelist has no remaining mems_allowed nodes,
1688 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1689 */
1690
8af5e2eb 1691 if (!IS_ERR(zonelist)) {
68860ec1
PJ
1692 /* Good - got mem - substitute new zonelist */
1693 kfree(pol->v.zonelist);
1694 pol->v.zonelist = zonelist;
1695 }
74cb2155 1696 *mpolmask = *newmask;
68860ec1
PJ
1697 break;
1698 }
1699 default:
1700 BUG();
1701 break;
1702 }
1703}
1704
1705/*
74cb2155
PJ
1706 * Wrapper for mpol_rebind_policy() that just requires task
1707 * pointer, and updates task mempolicy.
68860ec1 1708 */
74cb2155
PJ
1709
1710void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
68860ec1 1711{
74cb2155 1712 mpol_rebind_policy(tsk->mempolicy, new);
68860ec1 1713}
1a75a6c8 1714
4225399a
PJ
1715/*
1716 * Rebind each vma in mm to new nodemask.
1717 *
1718 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1719 */
1720
1721void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1722{
1723 struct vm_area_struct *vma;
1724
1725 down_write(&mm->mmap_sem);
1726 for (vma = mm->mmap; vma; vma = vma->vm_next)
1727 mpol_rebind_policy(vma->vm_policy, new);
1728 up_write(&mm->mmap_sem);
1729}
1730
1a75a6c8
CL
1731/*
1732 * Display pages allocated per node and memory policy via /proc.
1733 */
1734
15ad7cdc
HD
1735static const char * const policy_types[] =
1736 { "default", "prefer", "bind", "interleave" };
1a75a6c8
CL
1737
1738/*
1739 * Convert a mempolicy into a string.
1740 * Returns the number of characters in buffer (if positive)
1741 * or an error (negative)
1742 */
1743static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1744{
1745 char *p = buffer;
1746 int l;
1747 nodemask_t nodes;
1748 int mode = pol ? pol->policy : MPOL_DEFAULT;
1749
1750 switch (mode) {
1751 case MPOL_DEFAULT:
1752 nodes_clear(nodes);
1753 break;
1754
1755 case MPOL_PREFERRED:
1756 nodes_clear(nodes);
1757 node_set(pol->v.preferred_node, nodes);
1758 break;
1759
1760 case MPOL_BIND:
1761 get_zonemask(pol, &nodes);
1762 break;
1763
1764 case MPOL_INTERLEAVE:
1765 nodes = pol->v.nodes;
1766 break;
1767
1768 default:
1769 BUG();
1770 return -EFAULT;
1771 }
1772
1773 l = strlen(policy_types[mode]);
1774 if (buffer + maxlen < p + l + 1)
1775 return -ENOSPC;
1776
1777 strcpy(p, policy_types[mode]);
1778 p += l;
1779
1780 if (!nodes_empty(nodes)) {
1781 if (buffer + maxlen < p + 2)
1782 return -ENOSPC;
1783 *p++ = '=';
1784 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1785 }
1786 return p - buffer;
1787}
1788
1789struct numa_maps {
1790 unsigned long pages;
1791 unsigned long anon;
397874df
CL
1792 unsigned long active;
1793 unsigned long writeback;
1a75a6c8 1794 unsigned long mapcount_max;
397874df
CL
1795 unsigned long dirty;
1796 unsigned long swapcache;
1a75a6c8
CL
1797 unsigned long node[MAX_NUMNODES];
1798};
1799
397874df 1800static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
1801{
1802 struct numa_maps *md = private;
1803 int count = page_mapcount(page);
1804
397874df
CL
1805 md->pages++;
1806 if (pte_dirty || PageDirty(page))
1807 md->dirty++;
1a75a6c8 1808
397874df
CL
1809 if (PageSwapCache(page))
1810 md->swapcache++;
1a75a6c8 1811
397874df
CL
1812 if (PageActive(page))
1813 md->active++;
1814
1815 if (PageWriteback(page))
1816 md->writeback++;
1a75a6c8
CL
1817
1818 if (PageAnon(page))
1819 md->anon++;
1820
397874df
CL
1821 if (count > md->mapcount_max)
1822 md->mapcount_max = count;
1823
1a75a6c8 1824 md->node[page_to_nid(page)]++;
1a75a6c8
CL
1825}
1826
7f709ed0 1827#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
1828static void check_huge_range(struct vm_area_struct *vma,
1829 unsigned long start, unsigned long end,
1830 struct numa_maps *md)
1831{
1832 unsigned long addr;
1833 struct page *page;
1834
1835 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1836 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1837 pte_t pte;
1838
1839 if (!ptep)
1840 continue;
1841
1842 pte = *ptep;
1843 if (pte_none(pte))
1844 continue;
1845
1846 page = pte_page(pte);
1847 if (!page)
1848 continue;
1849
1850 gather_stats(page, md, pte_dirty(*ptep));
1851 }
1852}
7f709ed0
AM
1853#else
1854static inline void check_huge_range(struct vm_area_struct *vma,
1855 unsigned long start, unsigned long end,
1856 struct numa_maps *md)
1857{
1858}
1859#endif
397874df 1860
1a75a6c8
CL
1861int show_numa_map(struct seq_file *m, void *v)
1862{
99f89551 1863 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
1864 struct vm_area_struct *vma = v;
1865 struct numa_maps *md;
397874df
CL
1866 struct file *file = vma->vm_file;
1867 struct mm_struct *mm = vma->vm_mm;
1a75a6c8
CL
1868 int n;
1869 char buffer[50];
1870
397874df 1871 if (!mm)
1a75a6c8
CL
1872 return 0;
1873
1874 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1875 if (!md)
1876 return 0;
1877
397874df 1878 mpol_to_str(buffer, sizeof(buffer),
99f89551 1879 get_vma_policy(priv->task, vma, vma->vm_start));
397874df
CL
1880
1881 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1882
1883 if (file) {
1884 seq_printf(m, " file=");
e9536ae7 1885 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
397874df
CL
1886 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1887 seq_printf(m, " heap");
1888 } else if (vma->vm_start <= mm->start_stack &&
1889 vma->vm_end >= mm->start_stack) {
1890 seq_printf(m, " stack");
1891 }
1892
1893 if (is_vm_hugetlb_page(vma)) {
1894 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1895 seq_printf(m, " huge");
1896 } else {
a57ebfdb 1897 check_pgd_range(vma, vma->vm_start, vma->vm_end,
397874df
CL
1898 &node_online_map, MPOL_MF_STATS, md);
1899 }
1900
1901 if (!md->pages)
1902 goto out;
1a75a6c8 1903
397874df
CL
1904 if (md->anon)
1905 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 1906
397874df
CL
1907 if (md->dirty)
1908 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 1909
397874df
CL
1910 if (md->pages != md->anon && md->pages != md->dirty)
1911 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 1912
397874df
CL
1913 if (md->mapcount_max > 1)
1914 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 1915
397874df
CL
1916 if (md->swapcache)
1917 seq_printf(m," swapcache=%lu", md->swapcache);
1918
1919 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1920 seq_printf(m," active=%lu", md->active);
1921
1922 if (md->writeback)
1923 seq_printf(m," writeback=%lu", md->writeback);
1924
1925 for_each_online_node(n)
1926 if (md->node[n])
1927 seq_printf(m, " N%d=%lu", n, md->node[n]);
1928out:
1929 seq_putc(m, '\n');
1a75a6c8
CL
1930 kfree(md);
1931
1932 if (m->count < m->size)
99f89551 1933 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
1934 return 0;
1935}
1936