]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/migrate.c
CRED: Constify the kernel_cap_t arguments to the capset LSM hooks
[net-next-2.6.git] / mm / migrate.c
CommitLineData
b20a3503
CL
1/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
cde53535 12 * Christoph Lameter
b20a3503
CL
13 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
0697212a 18#include <linux/swapops.h>
b20a3503 19#include <linux/pagemap.h>
e23ca00b 20#include <linux/buffer_head.h>
b20a3503 21#include <linux/mm_inline.h>
b488893a 22#include <linux/nsproxy.h>
b20a3503
CL
23#include <linux/pagevec.h>
24#include <linux/rmap.h>
25#include <linux/topology.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
04e62a29 28#include <linux/writeback.h>
742755a1
CL
29#include <linux/mempolicy.h>
30#include <linux/vmalloc.h>
86c3a764 31#include <linux/security.h>
8a9f3ccd 32#include <linux/memcontrol.h>
4f5ca265 33#include <linux/syscalls.h>
b20a3503
CL
34
35#include "internal.h"
36
b20a3503
CL
37#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
38
b20a3503 39/*
742755a1
CL
40 * migrate_prep() needs to be called before we start compiling a list of pages
41 * to be migrated using isolate_lru_page().
b20a3503
CL
42 */
43int migrate_prep(void)
44{
b20a3503
CL
45 /*
46 * Clear the LRU lists so pages can be isolated.
47 * Note that pages may be moved off the LRU after we have
48 * drained them. Those pages will fail to migrate like other
49 * pages that may be busy.
50 */
51 lru_add_drain_all();
52
53 return 0;
54}
55
b20a3503 56/*
894bc310
LS
57 * Add isolated pages on the list back to the LRU under page lock
58 * to avoid leaking evictable pages back onto unevictable list.
b20a3503
CL
59 *
60 * returns the number of pages put back.
61 */
62int putback_lru_pages(struct list_head *l)
63{
64 struct page *page;
65 struct page *page2;
66 int count = 0;
67
68 list_for_each_entry_safe(page, page2, l, lru) {
e24f0b8f 69 list_del(&page->lru);
894bc310 70 putback_lru_page(page);
b20a3503
CL
71 count++;
72 }
73 return count;
74}
75
0697212a
CL
76/*
77 * Restore a potential migration pte to a working pte entry
78 */
04e62a29 79static void remove_migration_pte(struct vm_area_struct *vma,
0697212a
CL
80 struct page *old, struct page *new)
81{
82 struct mm_struct *mm = vma->vm_mm;
83 swp_entry_t entry;
84 pgd_t *pgd;
85 pud_t *pud;
86 pmd_t *pmd;
87 pte_t *ptep, pte;
88 spinlock_t *ptl;
04e62a29
CL
89 unsigned long addr = page_address_in_vma(new, vma);
90
91 if (addr == -EFAULT)
92 return;
0697212a
CL
93
94 pgd = pgd_offset(mm, addr);
95 if (!pgd_present(*pgd))
96 return;
97
98 pud = pud_offset(pgd, addr);
99 if (!pud_present(*pud))
100 return;
101
102 pmd = pmd_offset(pud, addr);
103 if (!pmd_present(*pmd))
104 return;
105
106 ptep = pte_offset_map(pmd, addr);
107
108 if (!is_swap_pte(*ptep)) {
109 pte_unmap(ptep);
110 return;
111 }
112
113 ptl = pte_lockptr(mm, pmd);
114 spin_lock(ptl);
115 pte = *ptep;
116 if (!is_swap_pte(pte))
117 goto out;
118
119 entry = pte_to_swp_entry(pte);
120
121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
122 goto out;
123
98837c7f
HD
124 /*
125 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
126 * Failure is not an option here: we're now expected to remove every
127 * migration pte, and will cause crashes otherwise. Normally this
128 * is not an issue: mem_cgroup_prepare_migration bumped up the old
129 * page_cgroup count for safety, that's now attached to the new page,
130 * so this charge should just be another incrementation of the count,
131 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
132 * there's been a force_empty, those reference counts may no longer
133 * be reliable, and this charge can actually fail: oh well, we don't
134 * make the situation any worse by proceeding as if it had succeeded.
135 */
136 mem_cgroup_charge(new, mm, GFP_ATOMIC);
137
0697212a
CL
138 get_page(new);
139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
140 if (is_write_migration_entry(entry))
141 pte = pte_mkwrite(pte);
97ee0524 142 flush_cache_page(vma, addr, pte_pfn(pte));
0697212a 143 set_pte_at(mm, addr, ptep, pte);
04e62a29
CL
144
145 if (PageAnon(new))
146 page_add_anon_rmap(new, vma, addr);
147 else
148 page_add_file_rmap(new);
149
150 /* No need to invalidate - it was non-present before */
151 update_mmu_cache(vma, addr, pte);
04e62a29 152
0697212a
CL
153out:
154 pte_unmap_unlock(ptep, ptl);
155}
156
157/*
04e62a29
CL
158 * Note that remove_file_migration_ptes will only work on regular mappings,
159 * Nonlinear mappings do not use migration entries.
160 */
161static void remove_file_migration_ptes(struct page *old, struct page *new)
162{
163 struct vm_area_struct *vma;
164 struct address_space *mapping = page_mapping(new);
165 struct prio_tree_iter iter;
166 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
167
168 if (!mapping)
169 return;
170
171 spin_lock(&mapping->i_mmap_lock);
172
173 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
174 remove_migration_pte(vma, old, new);
175
176 spin_unlock(&mapping->i_mmap_lock);
177}
178
179/*
0697212a
CL
180 * Must hold mmap_sem lock on at least one of the vmas containing
181 * the page so that the anon_vma cannot vanish.
182 */
04e62a29 183static void remove_anon_migration_ptes(struct page *old, struct page *new)
0697212a
CL
184{
185 struct anon_vma *anon_vma;
186 struct vm_area_struct *vma;
187 unsigned long mapping;
188
189 mapping = (unsigned long)new->mapping;
190
191 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
192 return;
193
194 /*
195 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
196 */
197 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
198 spin_lock(&anon_vma->lock);
199
200 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
04e62a29 201 remove_migration_pte(vma, old, new);
0697212a
CL
202
203 spin_unlock(&anon_vma->lock);
204}
205
04e62a29
CL
206/*
207 * Get rid of all migration entries and replace them by
208 * references to the indicated page.
209 */
210static void remove_migration_ptes(struct page *old, struct page *new)
211{
212 if (PageAnon(new))
213 remove_anon_migration_ptes(old, new);
214 else
215 remove_file_migration_ptes(old, new);
216}
217
0697212a
CL
218/*
219 * Something used the pte of a page under migration. We need to
220 * get to the page and wait until migration is finished.
221 * When we return from this function the fault will be retried.
222 *
223 * This function is called from do_swap_page().
224 */
225void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
226 unsigned long address)
227{
228 pte_t *ptep, pte;
229 spinlock_t *ptl;
230 swp_entry_t entry;
231 struct page *page;
232
233 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
234 pte = *ptep;
235 if (!is_swap_pte(pte))
236 goto out;
237
238 entry = pte_to_swp_entry(pte);
239 if (!is_migration_entry(entry))
240 goto out;
241
242 page = migration_entry_to_page(entry);
243
e286781d
NP
244 /*
245 * Once radix-tree replacement of page migration started, page_count
246 * *must* be zero. And, we don't want to call wait_on_page_locked()
247 * against a page without get_page().
248 * So, we use get_page_unless_zero(), here. Even failed, page fault
249 * will occur again.
250 */
251 if (!get_page_unless_zero(page))
252 goto out;
0697212a
CL
253 pte_unmap_unlock(ptep, ptl);
254 wait_on_page_locked(page);
255 put_page(page);
256 return;
257out:
258 pte_unmap_unlock(ptep, ptl);
259}
260
b20a3503 261/*
c3fcf8a5 262 * Replace the page in the mapping.
5b5c7120
CL
263 *
264 * The number of remaining references must be:
265 * 1 for anonymous pages without a mapping
266 * 2 for pages with a mapping
267 * 3 for pages with a mapping and PagePrivate set.
b20a3503 268 */
2d1db3b1
CL
269static int migrate_page_move_mapping(struct address_space *mapping,
270 struct page *newpage, struct page *page)
b20a3503 271{
e286781d 272 int expected_count;
7cf9c2c7 273 void **pslot;
b20a3503 274
6c5240ae 275 if (!mapping) {
0e8c7d0f 276 /* Anonymous page without mapping */
6c5240ae
CL
277 if (page_count(page) != 1)
278 return -EAGAIN;
279 return 0;
280 }
281
19fd6231 282 spin_lock_irq(&mapping->tree_lock);
b20a3503 283
7cf9c2c7
NP
284 pslot = radix_tree_lookup_slot(&mapping->page_tree,
285 page_index(page));
b20a3503 286
e286781d
NP
287 expected_count = 2 + !!PagePrivate(page);
288 if (page_count(page) != expected_count ||
7cf9c2c7 289 (struct page *)radix_tree_deref_slot(pslot) != page) {
19fd6231 290 spin_unlock_irq(&mapping->tree_lock);
e23ca00b 291 return -EAGAIN;
b20a3503
CL
292 }
293
e286781d 294 if (!page_freeze_refs(page, expected_count)) {
19fd6231 295 spin_unlock_irq(&mapping->tree_lock);
e286781d
NP
296 return -EAGAIN;
297 }
298
b20a3503
CL
299 /*
300 * Now we know that no one else is looking at the page.
b20a3503 301 */
7cf9c2c7 302 get_page(newpage); /* add cache reference */
6c5240ae 303#ifdef CONFIG_SWAP
b20a3503
CL
304 if (PageSwapCache(page)) {
305 SetPageSwapCache(newpage);
306 set_page_private(newpage, page_private(page));
307 }
6c5240ae 308#endif
b20a3503 309
7cf9c2c7
NP
310 radix_tree_replace_slot(pslot, newpage);
311
e286781d 312 page_unfreeze_refs(page, expected_count);
7cf9c2c7
NP
313 /*
314 * Drop cache reference from old page.
315 * We know this isn't the last reference.
316 */
b20a3503 317 __put_page(page);
7cf9c2c7 318
0e8c7d0f
CL
319 /*
320 * If moved to a different zone then also account
321 * the page for that zone. Other VM counters will be
322 * taken care of when we establish references to the
323 * new page and drop references to the old page.
324 *
325 * Note that anonymous pages are accounted for
326 * via NR_FILE_PAGES and NR_ANON_PAGES if they
327 * are mapped to swap space.
328 */
329 __dec_zone_page_state(page, NR_FILE_PAGES);
330 __inc_zone_page_state(newpage, NR_FILE_PAGES);
331
19fd6231 332 spin_unlock_irq(&mapping->tree_lock);
b20a3503
CL
333
334 return 0;
335}
b20a3503
CL
336
337/*
338 * Copy the page to its new location
339 */
e7340f73 340static void migrate_page_copy(struct page *newpage, struct page *page)
b20a3503 341{
b7abea96
KH
342 int anon;
343
b20a3503
CL
344 copy_highpage(newpage, page);
345
346 if (PageError(page))
347 SetPageError(newpage);
348 if (PageReferenced(page))
349 SetPageReferenced(newpage);
350 if (PageUptodate(page))
351 SetPageUptodate(newpage);
894bc310
LS
352 if (TestClearPageActive(page)) {
353 VM_BUG_ON(PageUnevictable(page));
b20a3503 354 SetPageActive(newpage);
894bc310
LS
355 } else
356 unevictable_migrate_page(newpage, page);
b20a3503
CL
357 if (PageChecked(page))
358 SetPageChecked(newpage);
359 if (PageMappedToDisk(page))
360 SetPageMappedToDisk(newpage);
361
362 if (PageDirty(page)) {
363 clear_page_dirty_for_io(page);
3a902c5f
NP
364 /*
365 * Want to mark the page and the radix tree as dirty, and
366 * redo the accounting that clear_page_dirty_for_io undid,
367 * but we can't use set_page_dirty because that function
368 * is actually a signal that all of the page has become dirty.
369 * Wheras only part of our page may be dirty.
370 */
371 __set_page_dirty_nobuffers(newpage);
b20a3503
CL
372 }
373
b291f000
NP
374 mlock_migrate_page(newpage, page);
375
6c5240ae 376#ifdef CONFIG_SWAP
b20a3503 377 ClearPageSwapCache(page);
6c5240ae 378#endif
b20a3503
CL
379 ClearPagePrivate(page);
380 set_page_private(page, 0);
b7abea96
KH
381 /* page->mapping contains a flag for PageAnon() */
382 anon = PageAnon(page);
b20a3503
CL
383 page->mapping = NULL;
384
b7abea96
KH
385 if (!anon) /* This page was removed from radix-tree. */
386 mem_cgroup_uncharge_cache_page(page);
387
b20a3503
CL
388 /*
389 * If any waiters have accumulated on the new page then
390 * wake them up.
391 */
392 if (PageWriteback(newpage))
393 end_page_writeback(newpage);
394}
b20a3503 395
1d8b85cc
CL
396/************************************************************
397 * Migration functions
398 ***********************************************************/
399
400/* Always fail migration. Used for mappings that are not movable */
2d1db3b1
CL
401int fail_migrate_page(struct address_space *mapping,
402 struct page *newpage, struct page *page)
1d8b85cc
CL
403{
404 return -EIO;
405}
406EXPORT_SYMBOL(fail_migrate_page);
407
b20a3503
CL
408/*
409 * Common logic to directly migrate a single page suitable for
410 * pages that do not use PagePrivate.
411 *
412 * Pages are locked upon entry and exit.
413 */
2d1db3b1
CL
414int migrate_page(struct address_space *mapping,
415 struct page *newpage, struct page *page)
b20a3503
CL
416{
417 int rc;
418
419 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
420
2d1db3b1 421 rc = migrate_page_move_mapping(mapping, newpage, page);
b20a3503
CL
422
423 if (rc)
424 return rc;
425
426 migrate_page_copy(newpage, page);
b20a3503
CL
427 return 0;
428}
429EXPORT_SYMBOL(migrate_page);
430
9361401e 431#ifdef CONFIG_BLOCK
1d8b85cc
CL
432/*
433 * Migration function for pages with buffers. This function can only be used
434 * if the underlying filesystem guarantees that no other references to "page"
435 * exist.
436 */
2d1db3b1
CL
437int buffer_migrate_page(struct address_space *mapping,
438 struct page *newpage, struct page *page)
1d8b85cc 439{
1d8b85cc
CL
440 struct buffer_head *bh, *head;
441 int rc;
442
1d8b85cc 443 if (!page_has_buffers(page))
2d1db3b1 444 return migrate_page(mapping, newpage, page);
1d8b85cc
CL
445
446 head = page_buffers(page);
447
2d1db3b1 448 rc = migrate_page_move_mapping(mapping, newpage, page);
1d8b85cc
CL
449
450 if (rc)
451 return rc;
452
453 bh = head;
454 do {
455 get_bh(bh);
456 lock_buffer(bh);
457 bh = bh->b_this_page;
458
459 } while (bh != head);
460
461 ClearPagePrivate(page);
462 set_page_private(newpage, page_private(page));
463 set_page_private(page, 0);
464 put_page(page);
465 get_page(newpage);
466
467 bh = head;
468 do {
469 set_bh_page(bh, newpage, bh_offset(bh));
470 bh = bh->b_this_page;
471
472 } while (bh != head);
473
474 SetPagePrivate(newpage);
475
476 migrate_page_copy(newpage, page);
477
478 bh = head;
479 do {
480 unlock_buffer(bh);
481 put_bh(bh);
482 bh = bh->b_this_page;
483
484 } while (bh != head);
485
486 return 0;
487}
488EXPORT_SYMBOL(buffer_migrate_page);
9361401e 489#endif
1d8b85cc 490
04e62a29
CL
491/*
492 * Writeback a page to clean the dirty state
493 */
494static int writeout(struct address_space *mapping, struct page *page)
8351a6e4 495{
04e62a29
CL
496 struct writeback_control wbc = {
497 .sync_mode = WB_SYNC_NONE,
498 .nr_to_write = 1,
499 .range_start = 0,
500 .range_end = LLONG_MAX,
501 .nonblocking = 1,
502 .for_reclaim = 1
503 };
504 int rc;
505
506 if (!mapping->a_ops->writepage)
507 /* No write method for the address space */
508 return -EINVAL;
509
510 if (!clear_page_dirty_for_io(page))
511 /* Someone else already triggered a write */
512 return -EAGAIN;
513
8351a6e4 514 /*
04e62a29
CL
515 * A dirty page may imply that the underlying filesystem has
516 * the page on some queue. So the page must be clean for
517 * migration. Writeout may mean we loose the lock and the
518 * page state is no longer what we checked for earlier.
519 * At this point we know that the migration attempt cannot
520 * be successful.
8351a6e4 521 */
04e62a29 522 remove_migration_ptes(page, page);
8351a6e4 523
04e62a29
CL
524 rc = mapping->a_ops->writepage(page, &wbc);
525 if (rc < 0)
526 /* I/O Error writing */
527 return -EIO;
8351a6e4 528
04e62a29
CL
529 if (rc != AOP_WRITEPAGE_ACTIVATE)
530 /* unlocked. Relock */
531 lock_page(page);
532
533 return -EAGAIN;
534}
535
536/*
537 * Default handling if a filesystem does not provide a migration function.
538 */
539static int fallback_migrate_page(struct address_space *mapping,
540 struct page *newpage, struct page *page)
541{
542 if (PageDirty(page))
543 return writeout(mapping, page);
8351a6e4
CL
544
545 /*
546 * Buffers may be managed in a filesystem specific way.
547 * We must have no buffers or drop them.
548 */
b398f6bf 549 if (PagePrivate(page) &&
8351a6e4
CL
550 !try_to_release_page(page, GFP_KERNEL))
551 return -EAGAIN;
552
553 return migrate_page(mapping, newpage, page);
554}
555
e24f0b8f
CL
556/*
557 * Move a page to a newly allocated page
558 * The page is locked and all ptes have been successfully removed.
559 *
560 * The new page will have replaced the old page if this function
561 * is successful.
894bc310
LS
562 *
563 * Return value:
564 * < 0 - error code
565 * == 0 - success
e24f0b8f
CL
566 */
567static int move_to_new_page(struct page *newpage, struct page *page)
568{
569 struct address_space *mapping;
570 int rc;
571
572 /*
573 * Block others from accessing the page when we get around to
574 * establishing additional references. We are the only one
575 * holding a reference to the new page at this point.
576 */
529ae9aa 577 if (!trylock_page(newpage))
e24f0b8f
CL
578 BUG();
579
580 /* Prepare mapping for the new page.*/
581 newpage->index = page->index;
582 newpage->mapping = page->mapping;
b2e18538
RR
583 if (PageSwapBacked(page))
584 SetPageSwapBacked(newpage);
e24f0b8f
CL
585
586 mapping = page_mapping(page);
587 if (!mapping)
588 rc = migrate_page(mapping, newpage, page);
589 else if (mapping->a_ops->migratepage)
590 /*
591 * Most pages have a mapping and most filesystems
592 * should provide a migration function. Anonymous
593 * pages are part of swap space which also has its
594 * own migration function. This is the most common
595 * path for page migration.
596 */
597 rc = mapping->a_ops->migratepage(mapping,
598 newpage, page);
599 else
600 rc = fallback_migrate_page(mapping, newpage, page);
601
ae41be37 602 if (!rc) {
e24f0b8f 603 remove_migration_ptes(page, newpage);
ae41be37 604 } else
e24f0b8f
CL
605 newpage->mapping = NULL;
606
607 unlock_page(newpage);
608
609 return rc;
610}
611
612/*
613 * Obtain the lock on page, remove all ptes and migrate the page
614 * to the newly allocated page in newpage.
615 */
95a402c3
CL
616static int unmap_and_move(new_page_t get_new_page, unsigned long private,
617 struct page *page, int force)
e24f0b8f
CL
618{
619 int rc = 0;
742755a1
CL
620 int *result = NULL;
621 struct page *newpage = get_new_page(page, private, &result);
989f89c5 622 int rcu_locked = 0;
ae41be37 623 int charge = 0;
95a402c3
CL
624
625 if (!newpage)
626 return -ENOMEM;
e24f0b8f 627
894bc310 628 if (page_count(page) == 1) {
e24f0b8f 629 /* page was freed from under us. So we are done. */
95a402c3 630 goto move_newpage;
894bc310 631 }
e24f0b8f 632
e8589cc1
KH
633 charge = mem_cgroup_prepare_migration(page, newpage);
634 if (charge == -ENOMEM) {
635 rc = -ENOMEM;
636 goto move_newpage;
637 }
638 /* prepare cgroup just returns 0 or -ENOMEM */
639 BUG_ON(charge);
640
e24f0b8f 641 rc = -EAGAIN;
529ae9aa 642 if (!trylock_page(page)) {
e24f0b8f 643 if (!force)
95a402c3 644 goto move_newpage;
e24f0b8f
CL
645 lock_page(page);
646 }
647
648 if (PageWriteback(page)) {
649 if (!force)
650 goto unlock;
651 wait_on_page_writeback(page);
652 }
e24f0b8f 653 /*
dc386d4d
KH
654 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
655 * we cannot notice that anon_vma is freed while we migrates a page.
656 * This rcu_read_lock() delays freeing anon_vma pointer until the end
657 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
658 * File Caches may use write_page() or lock_page() in migration, then,
659 * just care Anon page here.
dc386d4d 660 */
989f89c5
KH
661 if (PageAnon(page)) {
662 rcu_read_lock();
663 rcu_locked = 1;
664 }
62e1c553 665
dc386d4d 666 /*
62e1c553
SL
667 * Corner case handling:
668 * 1. When a new swap-cache page is read into, it is added to the LRU
669 * and treated as swapcache but it has no rmap yet.
670 * Calling try_to_unmap() against a page->mapping==NULL page will
671 * trigger a BUG. So handle it here.
672 * 2. An orphaned page (see truncate_complete_page) might have
673 * fs-private metadata. The page can be picked up due to memory
674 * offlining. Everywhere else except page reclaim, the page is
675 * invisible to the vm, so the page can not be migrated. So try to
676 * free the metadata, so the page can be freed.
e24f0b8f 677 */
62e1c553
SL
678 if (!page->mapping) {
679 if (!PageAnon(page) && PagePrivate(page)) {
680 /*
681 * Go direct to try_to_free_buffers() here because
682 * a) that's what try_to_release_page() would do anyway
683 * b) we may be under rcu_read_lock() here, so we can't
684 * use GFP_KERNEL which is what try_to_release_page()
685 * needs to be effective.
686 */
687 try_to_free_buffers(page);
688 }
dc386d4d 689 goto rcu_unlock;
62e1c553
SL
690 }
691
dc386d4d 692 /* Establish migration ptes or remove ptes */
e6a1530d 693 try_to_unmap(page, 1);
dc386d4d 694
e6a1530d
CL
695 if (!page_mapped(page))
696 rc = move_to_new_page(newpage, page);
e24f0b8f 697
e8589cc1 698 if (rc)
e24f0b8f 699 remove_migration_ptes(page, page);
dc386d4d 700rcu_unlock:
989f89c5
KH
701 if (rcu_locked)
702 rcu_read_unlock();
e6a1530d 703
e24f0b8f
CL
704unlock:
705 unlock_page(page);
95a402c3 706
e24f0b8f 707 if (rc != -EAGAIN) {
aaa994b3
CL
708 /*
709 * A page that has been migrated has all references
710 * removed and will be freed. A page that has not been
711 * migrated will have kepts its references and be
712 * restored.
713 */
714 list_del(&page->lru);
894bc310 715 putback_lru_page(page);
e24f0b8f 716 }
95a402c3
CL
717
718move_newpage:
e8589cc1
KH
719 if (!charge)
720 mem_cgroup_end_migration(newpage);
894bc310 721
95a402c3
CL
722 /*
723 * Move the new page to the LRU. If migration was not successful
724 * then this will free the page.
725 */
894bc310
LS
726 putback_lru_page(newpage);
727
742755a1
CL
728 if (result) {
729 if (rc)
730 *result = rc;
731 else
732 *result = page_to_nid(newpage);
733 }
e24f0b8f
CL
734 return rc;
735}
736
b20a3503
CL
737/*
738 * migrate_pages
739 *
95a402c3
CL
740 * The function takes one list of pages to migrate and a function
741 * that determines from the page to be migrated and the private data
742 * the target of the move and allocates the page.
b20a3503
CL
743 *
744 * The function returns after 10 attempts or if no pages
745 * are movable anymore because to has become empty
aaa994b3 746 * or no retryable pages exist anymore. All pages will be
e9534b3f 747 * returned to the LRU or freed.
b20a3503 748 *
95a402c3 749 * Return: Number of pages not migrated or error code.
b20a3503 750 */
95a402c3
CL
751int migrate_pages(struct list_head *from,
752 new_page_t get_new_page, unsigned long private)
b20a3503 753{
e24f0b8f 754 int retry = 1;
b20a3503
CL
755 int nr_failed = 0;
756 int pass = 0;
757 struct page *page;
758 struct page *page2;
759 int swapwrite = current->flags & PF_SWAPWRITE;
760 int rc;
761
762 if (!swapwrite)
763 current->flags |= PF_SWAPWRITE;
764
e24f0b8f
CL
765 for(pass = 0; pass < 10 && retry; pass++) {
766 retry = 0;
b20a3503 767
e24f0b8f 768 list_for_each_entry_safe(page, page2, from, lru) {
e24f0b8f 769 cond_resched();
2d1db3b1 770
95a402c3
CL
771 rc = unmap_and_move(get_new_page, private,
772 page, pass > 2);
2d1db3b1 773
e24f0b8f 774 switch(rc) {
95a402c3
CL
775 case -ENOMEM:
776 goto out;
e24f0b8f 777 case -EAGAIN:
2d1db3b1 778 retry++;
e24f0b8f
CL
779 break;
780 case 0:
e24f0b8f
CL
781 break;
782 default:
2d1db3b1 783 /* Permanent failure */
2d1db3b1 784 nr_failed++;
e24f0b8f 785 break;
2d1db3b1 786 }
b20a3503
CL
787 }
788 }
95a402c3
CL
789 rc = 0;
790out:
b20a3503
CL
791 if (!swapwrite)
792 current->flags &= ~PF_SWAPWRITE;
793
aaa994b3 794 putback_lru_pages(from);
b20a3503 795
95a402c3
CL
796 if (rc)
797 return rc;
b20a3503 798
95a402c3 799 return nr_failed + retry;
b20a3503 800}
95a402c3 801
742755a1
CL
802#ifdef CONFIG_NUMA
803/*
804 * Move a list of individual pages
805 */
806struct page_to_node {
807 unsigned long addr;
808 struct page *page;
809 int node;
810 int status;
811};
812
813static struct page *new_page_node(struct page *p, unsigned long private,
814 int **result)
815{
816 struct page_to_node *pm = (struct page_to_node *)private;
817
818 while (pm->node != MAX_NUMNODES && pm->page != p)
819 pm++;
820
821 if (pm->node == MAX_NUMNODES)
822 return NULL;
823
824 *result = &pm->status;
825
769848c0
MG
826 return alloc_pages_node(pm->node,
827 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
742755a1
CL
828}
829
830/*
831 * Move a set of pages as indicated in the pm array. The addr
832 * field must be set to the virtual address of the page to be moved
833 * and the node number must contain a valid target node.
5e9a0f02 834 * The pm array ends with node = MAX_NUMNODES.
742755a1 835 */
5e9a0f02
BG
836static int do_move_page_to_node_array(struct mm_struct *mm,
837 struct page_to_node *pm,
838 int migrate_all)
742755a1
CL
839{
840 int err;
841 struct page_to_node *pp;
842 LIST_HEAD(pagelist);
843
844 down_read(&mm->mmap_sem);
845
846 /*
847 * Build a list of pages to migrate
848 */
849 migrate_prep();
850 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
851 struct vm_area_struct *vma;
852 struct page *page;
853
854 /*
855 * A valid page pointer that will not match any of the
856 * pages that will be moved.
857 */
858 pp->page = ZERO_PAGE(0);
859
860 err = -EFAULT;
861 vma = find_vma(mm, pp->addr);
0dc952dc 862 if (!vma || !vma_migratable(vma))
742755a1
CL
863 goto set_status;
864
865 page = follow_page(vma, pp->addr, FOLL_GET);
89f5b7da
LT
866
867 err = PTR_ERR(page);
868 if (IS_ERR(page))
869 goto set_status;
870
742755a1
CL
871 err = -ENOENT;
872 if (!page)
873 goto set_status;
874
875 if (PageReserved(page)) /* Check for zero page */
876 goto put_and_set;
877
878 pp->page = page;
879 err = page_to_nid(page);
880
881 if (err == pp->node)
882 /*
883 * Node already in the right place
884 */
885 goto put_and_set;
886
887 err = -EACCES;
888 if (page_mapcount(page) > 1 &&
889 !migrate_all)
890 goto put_and_set;
891
62695a84
NP
892 err = isolate_lru_page(page);
893 if (!err)
894 list_add_tail(&page->lru, &pagelist);
742755a1
CL
895put_and_set:
896 /*
897 * Either remove the duplicate refcount from
898 * isolate_lru_page() or drop the page ref if it was
899 * not isolated.
900 */
901 put_page(page);
902set_status:
903 pp->status = err;
904 }
905
e78bbfa8 906 err = 0;
742755a1
CL
907 if (!list_empty(&pagelist))
908 err = migrate_pages(&pagelist, new_page_node,
909 (unsigned long)pm);
742755a1
CL
910
911 up_read(&mm->mmap_sem);
912 return err;
913}
914
5e9a0f02
BG
915/*
916 * Migrate an array of page address onto an array of nodes and fill
917 * the corresponding array of status.
918 */
919static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
920 unsigned long nr_pages,
921 const void __user * __user *pages,
922 const int __user *nodes,
923 int __user *status, int flags)
924{
925 struct page_to_node *pm = NULL;
926 nodemask_t task_nodes;
927 int err = 0;
928 int i;
929
930 task_nodes = cpuset_mems_allowed(task);
931
932 /* Limit nr_pages so that the multiplication may not overflow */
933 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
934 err = -E2BIG;
935 goto out;
936 }
937
938 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
939 if (!pm) {
940 err = -ENOMEM;
941 goto out;
942 }
943
944 /*
945 * Get parameters from user space and initialize the pm
946 * array. Return various errors if the user did something wrong.
947 */
948 for (i = 0; i < nr_pages; i++) {
949 const void __user *p;
950
951 err = -EFAULT;
952 if (get_user(p, pages + i))
953 goto out_pm;
954
955 pm[i].addr = (unsigned long)p;
956 if (nodes) {
957 int node;
958
959 if (get_user(node, nodes + i))
960 goto out_pm;
961
962 err = -ENODEV;
963 if (!node_state(node, N_HIGH_MEMORY))
964 goto out_pm;
965
966 err = -EACCES;
967 if (!node_isset(node, task_nodes))
968 goto out_pm;
969
970 pm[i].node = node;
971 } else
972 pm[i].node = 0; /* anything to not match MAX_NUMNODES */
973 }
974 /* End marker */
975 pm[nr_pages].node = MAX_NUMNODES;
976
977 err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL);
978 if (err >= 0)
979 /* Return status information */
980 for (i = 0; i < nr_pages; i++)
981 if (put_user(pm[i].status, status + i))
982 err = -EFAULT;
983
984out_pm:
985 vfree(pm);
986out:
987 return err;
988}
989
742755a1 990/*
2f007e74 991 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 992 */
2f007e74
BG
993static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
994 const void __user * __user *pages,
995 int __user *status)
742755a1 996{
2f007e74
BG
997 unsigned long i;
998 int err;
999
742755a1
CL
1000 down_read(&mm->mmap_sem);
1001
2f007e74
BG
1002 for (i = 0; i < nr_pages; i++) {
1003 const void __user *p;
1004 unsigned long addr;
742755a1
CL
1005 struct vm_area_struct *vma;
1006 struct page *page;
742755a1
CL
1007
1008 err = -EFAULT;
2f007e74
BG
1009 if (get_user(p, pages+i))
1010 goto out;
1011 addr = (unsigned long) p;
1012
1013 vma = find_vma(mm, addr);
742755a1
CL
1014 if (!vma)
1015 goto set_status;
1016
2f007e74 1017 page = follow_page(vma, addr, 0);
89f5b7da
LT
1018
1019 err = PTR_ERR(page);
1020 if (IS_ERR(page))
1021 goto set_status;
1022
742755a1
CL
1023 err = -ENOENT;
1024 /* Use PageReserved to check for zero page */
1025 if (!page || PageReserved(page))
1026 goto set_status;
1027
1028 err = page_to_nid(page);
1029set_status:
2f007e74 1030 put_user(err, status+i);
742755a1 1031 }
2f007e74 1032 err = 0;
742755a1 1033
2f007e74 1034out:
742755a1 1035 up_read(&mm->mmap_sem);
2f007e74 1036 return err;
742755a1
CL
1037}
1038
1039/*
1040 * Move a list of pages in the address space of the currently executing
1041 * process.
1042 */
1043asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
1044 const void __user * __user *pages,
1045 const int __user *nodes,
1046 int __user *status, int flags)
1047{
742755a1 1048 struct task_struct *task;
742755a1 1049 struct mm_struct *mm;
5e9a0f02 1050 int err;
76aac0e9 1051 uid_t uid, euid;
742755a1
CL
1052
1053 /* Check flags */
1054 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1055 return -EINVAL;
1056
1057 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1058 return -EPERM;
1059
1060 /* Find the mm_struct */
1061 read_lock(&tasklist_lock);
228ebcbe 1062 task = pid ? find_task_by_vpid(pid) : current;
742755a1
CL
1063 if (!task) {
1064 read_unlock(&tasklist_lock);
1065 return -ESRCH;
1066 }
1067 mm = get_task_mm(task);
1068 read_unlock(&tasklist_lock);
1069
1070 if (!mm)
1071 return -EINVAL;
1072
1073 /*
1074 * Check if this process has the right to modify the specified
1075 * process. The right exists if the process has administrative
1076 * capabilities, superuser privileges or the same
1077 * userid as the target process.
1078 */
76aac0e9
DH
1079 uid = current_uid();
1080 euid = current_euid();
1081 if (euid != task->suid && euid != task->uid &&
1082 uid != task->suid && uid != task->uid &&
742755a1
CL
1083 !capable(CAP_SYS_NICE)) {
1084 err = -EPERM;
5e9a0f02 1085 goto out;
742755a1
CL
1086 }
1087
86c3a764
DQ
1088 err = security_task_movememory(task);
1089 if (err)
5e9a0f02 1090 goto out;
86c3a764 1091
5e9a0f02
BG
1092 if (nodes) {
1093 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1094 flags);
1095 } else {
2f007e74 1096 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1
CL
1097 }
1098
742755a1 1099out:
742755a1
CL
1100 mmput(mm);
1101 return err;
1102}
742755a1 1103
7b2259b3
CL
1104/*
1105 * Call migration functions in the vma_ops that may prepare
1106 * memory in a vm for migration. migration functions may perform
1107 * the migration for vmas that do not have an underlying page struct.
1108 */
1109int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1110 const nodemask_t *from, unsigned long flags)
1111{
1112 struct vm_area_struct *vma;
1113 int err = 0;
1114
1115 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
1116 if (vma->vm_ops && vma->vm_ops->migrate) {
1117 err = vma->vm_ops->migrate(vma, to, from, flags);
1118 if (err)
1119 break;
1120 }
1121 }
1122 return err;
1123}
83d1674a 1124#endif