]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/migrate.c
radix-tree: add gang_lookup_slot, gang_lookup_slot_tag
[net-next-2.6.git] / mm / migrate.c
CommitLineData
b20a3503
CL
1/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
cde53535 12 * Christoph Lameter
b20a3503
CL
13 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
0697212a 18#include <linux/swapops.h>
b20a3503 19#include <linux/pagemap.h>
e23ca00b 20#include <linux/buffer_head.h>
b20a3503 21#include <linux/mm_inline.h>
b488893a 22#include <linux/nsproxy.h>
b20a3503
CL
23#include <linux/pagevec.h>
24#include <linux/rmap.h>
25#include <linux/topology.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
04e62a29 28#include <linux/writeback.h>
742755a1
CL
29#include <linux/mempolicy.h>
30#include <linux/vmalloc.h>
86c3a764 31#include <linux/security.h>
8a9f3ccd 32#include <linux/memcontrol.h>
4f5ca265 33#include <linux/syscalls.h>
b20a3503
CL
34
35#include "internal.h"
36
b20a3503
CL
37#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
38
39/*
40 * Isolate one page from the LRU lists. If successful put it onto
41 * the indicated list with elevated page count.
42 *
43 * Result:
44 * -EBUSY: page not on LRU list
45 * 0: page removed from LRU list and added to the specified list.
46 */
47int isolate_lru_page(struct page *page, struct list_head *pagelist)
48{
49 int ret = -EBUSY;
50
51 if (PageLRU(page)) {
52 struct zone *zone = page_zone(page);
53
54 spin_lock_irq(&zone->lru_lock);
3dd9fe8c 55 if (PageLRU(page) && get_page_unless_zero(page)) {
b20a3503 56 ret = 0;
b20a3503
CL
57 ClearPageLRU(page);
58 if (PageActive(page))
59 del_page_from_active_list(zone, page);
60 else
61 del_page_from_inactive_list(zone, page);
62 list_add_tail(&page->lru, pagelist);
63 }
64 spin_unlock_irq(&zone->lru_lock);
65 }
66 return ret;
67}
68
69/*
742755a1
CL
70 * migrate_prep() needs to be called before we start compiling a list of pages
71 * to be migrated using isolate_lru_page().
b20a3503
CL
72 */
73int migrate_prep(void)
74{
b20a3503
CL
75 /*
76 * Clear the LRU lists so pages can be isolated.
77 * Note that pages may be moved off the LRU after we have
78 * drained them. Those pages will fail to migrate like other
79 * pages that may be busy.
80 */
81 lru_add_drain_all();
82
83 return 0;
84}
85
86static inline void move_to_lru(struct page *page)
87{
b20a3503
CL
88 if (PageActive(page)) {
89 /*
90 * lru_cache_add_active checks that
91 * the PG_active bit is off.
92 */
93 ClearPageActive(page);
94 lru_cache_add_active(page);
95 } else {
96 lru_cache_add(page);
97 }
98 put_page(page);
99}
100
101/*
102 * Add isolated pages on the list back to the LRU.
103 *
104 * returns the number of pages put back.
105 */
106int putback_lru_pages(struct list_head *l)
107{
108 struct page *page;
109 struct page *page2;
110 int count = 0;
111
112 list_for_each_entry_safe(page, page2, l, lru) {
e24f0b8f 113 list_del(&page->lru);
b20a3503
CL
114 move_to_lru(page);
115 count++;
116 }
117 return count;
118}
119
0697212a
CL
120/*
121 * Restore a potential migration pte to a working pte entry
122 */
04e62a29 123static void remove_migration_pte(struct vm_area_struct *vma,
0697212a
CL
124 struct page *old, struct page *new)
125{
126 struct mm_struct *mm = vma->vm_mm;
127 swp_entry_t entry;
128 pgd_t *pgd;
129 pud_t *pud;
130 pmd_t *pmd;
131 pte_t *ptep, pte;
132 spinlock_t *ptl;
04e62a29
CL
133 unsigned long addr = page_address_in_vma(new, vma);
134
135 if (addr == -EFAULT)
136 return;
0697212a
CL
137
138 pgd = pgd_offset(mm, addr);
139 if (!pgd_present(*pgd))
140 return;
141
142 pud = pud_offset(pgd, addr);
143 if (!pud_present(*pud))
144 return;
145
146 pmd = pmd_offset(pud, addr);
147 if (!pmd_present(*pmd))
148 return;
149
150 ptep = pte_offset_map(pmd, addr);
151
152 if (!is_swap_pte(*ptep)) {
153 pte_unmap(ptep);
154 return;
155 }
156
157 ptl = pte_lockptr(mm, pmd);
158 spin_lock(ptl);
159 pte = *ptep;
160 if (!is_swap_pte(pte))
161 goto out;
162
163 entry = pte_to_swp_entry(pte);
164
165 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
166 goto out;
167
98837c7f
HD
168 /*
169 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
170 * Failure is not an option here: we're now expected to remove every
171 * migration pte, and will cause crashes otherwise. Normally this
172 * is not an issue: mem_cgroup_prepare_migration bumped up the old
173 * page_cgroup count for safety, that's now attached to the new page,
174 * so this charge should just be another incrementation of the count,
175 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
176 * there's been a force_empty, those reference counts may no longer
177 * be reliable, and this charge can actually fail: oh well, we don't
178 * make the situation any worse by proceeding as if it had succeeded.
179 */
180 mem_cgroup_charge(new, mm, GFP_ATOMIC);
181
0697212a
CL
182 get_page(new);
183 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
184 if (is_write_migration_entry(entry))
185 pte = pte_mkwrite(pte);
97ee0524 186 flush_cache_page(vma, addr, pte_pfn(pte));
0697212a 187 set_pte_at(mm, addr, ptep, pte);
04e62a29
CL
188
189 if (PageAnon(new))
190 page_add_anon_rmap(new, vma, addr);
191 else
192 page_add_file_rmap(new);
193
194 /* No need to invalidate - it was non-present before */
195 update_mmu_cache(vma, addr, pte);
04e62a29 196
0697212a
CL
197out:
198 pte_unmap_unlock(ptep, ptl);
199}
200
201/*
04e62a29
CL
202 * Note that remove_file_migration_ptes will only work on regular mappings,
203 * Nonlinear mappings do not use migration entries.
204 */
205static void remove_file_migration_ptes(struct page *old, struct page *new)
206{
207 struct vm_area_struct *vma;
208 struct address_space *mapping = page_mapping(new);
209 struct prio_tree_iter iter;
210 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
211
212 if (!mapping)
213 return;
214
215 spin_lock(&mapping->i_mmap_lock);
216
217 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
218 remove_migration_pte(vma, old, new);
219
220 spin_unlock(&mapping->i_mmap_lock);
221}
222
223/*
0697212a
CL
224 * Must hold mmap_sem lock on at least one of the vmas containing
225 * the page so that the anon_vma cannot vanish.
226 */
04e62a29 227static void remove_anon_migration_ptes(struct page *old, struct page *new)
0697212a
CL
228{
229 struct anon_vma *anon_vma;
230 struct vm_area_struct *vma;
231 unsigned long mapping;
232
233 mapping = (unsigned long)new->mapping;
234
235 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
236 return;
237
238 /*
239 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
240 */
241 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
242 spin_lock(&anon_vma->lock);
243
244 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
04e62a29 245 remove_migration_pte(vma, old, new);
0697212a
CL
246
247 spin_unlock(&anon_vma->lock);
248}
249
04e62a29
CL
250/*
251 * Get rid of all migration entries and replace them by
252 * references to the indicated page.
253 */
254static void remove_migration_ptes(struct page *old, struct page *new)
255{
256 if (PageAnon(new))
257 remove_anon_migration_ptes(old, new);
258 else
259 remove_file_migration_ptes(old, new);
260}
261
0697212a
CL
262/*
263 * Something used the pte of a page under migration. We need to
264 * get to the page and wait until migration is finished.
265 * When we return from this function the fault will be retried.
266 *
267 * This function is called from do_swap_page().
268 */
269void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
270 unsigned long address)
271{
272 pte_t *ptep, pte;
273 spinlock_t *ptl;
274 swp_entry_t entry;
275 struct page *page;
276
277 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
278 pte = *ptep;
279 if (!is_swap_pte(pte))
280 goto out;
281
282 entry = pte_to_swp_entry(pte);
283 if (!is_migration_entry(entry))
284 goto out;
285
286 page = migration_entry_to_page(entry);
287
288 get_page(page);
289 pte_unmap_unlock(ptep, ptl);
290 wait_on_page_locked(page);
291 put_page(page);
292 return;
293out:
294 pte_unmap_unlock(ptep, ptl);
295}
296
b20a3503 297/*
c3fcf8a5 298 * Replace the page in the mapping.
5b5c7120
CL
299 *
300 * The number of remaining references must be:
301 * 1 for anonymous pages without a mapping
302 * 2 for pages with a mapping
303 * 3 for pages with a mapping and PagePrivate set.
b20a3503 304 */
2d1db3b1
CL
305static int migrate_page_move_mapping(struct address_space *mapping,
306 struct page *newpage, struct page *page)
b20a3503 307{
7cf9c2c7 308 void **pslot;
b20a3503 309
6c5240ae 310 if (!mapping) {
0e8c7d0f 311 /* Anonymous page without mapping */
6c5240ae
CL
312 if (page_count(page) != 1)
313 return -EAGAIN;
314 return 0;
315 }
316
b20a3503
CL
317 write_lock_irq(&mapping->tree_lock);
318
7cf9c2c7
NP
319 pslot = radix_tree_lookup_slot(&mapping->page_tree,
320 page_index(page));
b20a3503 321
6c5240ae 322 if (page_count(page) != 2 + !!PagePrivate(page) ||
7cf9c2c7 323 (struct page *)radix_tree_deref_slot(pslot) != page) {
b20a3503 324 write_unlock_irq(&mapping->tree_lock);
e23ca00b 325 return -EAGAIN;
b20a3503
CL
326 }
327
328 /*
329 * Now we know that no one else is looking at the page.
b20a3503 330 */
7cf9c2c7 331 get_page(newpage); /* add cache reference */
6c5240ae 332#ifdef CONFIG_SWAP
b20a3503
CL
333 if (PageSwapCache(page)) {
334 SetPageSwapCache(newpage);
335 set_page_private(newpage, page_private(page));
336 }
6c5240ae 337#endif
b20a3503 338
7cf9c2c7
NP
339 radix_tree_replace_slot(pslot, newpage);
340
341 /*
342 * Drop cache reference from old page.
343 * We know this isn't the last reference.
344 */
b20a3503 345 __put_page(page);
7cf9c2c7 346
0e8c7d0f
CL
347 /*
348 * If moved to a different zone then also account
349 * the page for that zone. Other VM counters will be
350 * taken care of when we establish references to the
351 * new page and drop references to the old page.
352 *
353 * Note that anonymous pages are accounted for
354 * via NR_FILE_PAGES and NR_ANON_PAGES if they
355 * are mapped to swap space.
356 */
357 __dec_zone_page_state(page, NR_FILE_PAGES);
358 __inc_zone_page_state(newpage, NR_FILE_PAGES);
359
b20a3503 360 write_unlock_irq(&mapping->tree_lock);
e8589cc1 361 if (!PageSwapCache(newpage)) {
69029cd5 362 mem_cgroup_uncharge_cache_page(page);
e8589cc1 363 }
b20a3503
CL
364
365 return 0;
366}
b20a3503
CL
367
368/*
369 * Copy the page to its new location
370 */
e7340f73 371static void migrate_page_copy(struct page *newpage, struct page *page)
b20a3503
CL
372{
373 copy_highpage(newpage, page);
374
375 if (PageError(page))
376 SetPageError(newpage);
377 if (PageReferenced(page))
378 SetPageReferenced(newpage);
379 if (PageUptodate(page))
380 SetPageUptodate(newpage);
381 if (PageActive(page))
382 SetPageActive(newpage);
383 if (PageChecked(page))
384 SetPageChecked(newpage);
385 if (PageMappedToDisk(page))
386 SetPageMappedToDisk(newpage);
387
388 if (PageDirty(page)) {
389 clear_page_dirty_for_io(page);
3a902c5f
NP
390 /*
391 * Want to mark the page and the radix tree as dirty, and
392 * redo the accounting that clear_page_dirty_for_io undid,
393 * but we can't use set_page_dirty because that function
394 * is actually a signal that all of the page has become dirty.
395 * Wheras only part of our page may be dirty.
396 */
397 __set_page_dirty_nobuffers(newpage);
b20a3503
CL
398 }
399
6c5240ae 400#ifdef CONFIG_SWAP
b20a3503 401 ClearPageSwapCache(page);
6c5240ae 402#endif
b20a3503
CL
403 ClearPageActive(page);
404 ClearPagePrivate(page);
405 set_page_private(page, 0);
406 page->mapping = NULL;
407
408 /*
409 * If any waiters have accumulated on the new page then
410 * wake them up.
411 */
412 if (PageWriteback(newpage))
413 end_page_writeback(newpage);
414}
b20a3503 415
1d8b85cc
CL
416/************************************************************
417 * Migration functions
418 ***********************************************************/
419
420/* Always fail migration. Used for mappings that are not movable */
2d1db3b1
CL
421int fail_migrate_page(struct address_space *mapping,
422 struct page *newpage, struct page *page)
1d8b85cc
CL
423{
424 return -EIO;
425}
426EXPORT_SYMBOL(fail_migrate_page);
427
b20a3503
CL
428/*
429 * Common logic to directly migrate a single page suitable for
430 * pages that do not use PagePrivate.
431 *
432 * Pages are locked upon entry and exit.
433 */
2d1db3b1
CL
434int migrate_page(struct address_space *mapping,
435 struct page *newpage, struct page *page)
b20a3503
CL
436{
437 int rc;
438
439 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
440
2d1db3b1 441 rc = migrate_page_move_mapping(mapping, newpage, page);
b20a3503
CL
442
443 if (rc)
444 return rc;
445
446 migrate_page_copy(newpage, page);
b20a3503
CL
447 return 0;
448}
449EXPORT_SYMBOL(migrate_page);
450
9361401e 451#ifdef CONFIG_BLOCK
1d8b85cc
CL
452/*
453 * Migration function for pages with buffers. This function can only be used
454 * if the underlying filesystem guarantees that no other references to "page"
455 * exist.
456 */
2d1db3b1
CL
457int buffer_migrate_page(struct address_space *mapping,
458 struct page *newpage, struct page *page)
1d8b85cc 459{
1d8b85cc
CL
460 struct buffer_head *bh, *head;
461 int rc;
462
1d8b85cc 463 if (!page_has_buffers(page))
2d1db3b1 464 return migrate_page(mapping, newpage, page);
1d8b85cc
CL
465
466 head = page_buffers(page);
467
2d1db3b1 468 rc = migrate_page_move_mapping(mapping, newpage, page);
1d8b85cc
CL
469
470 if (rc)
471 return rc;
472
473 bh = head;
474 do {
475 get_bh(bh);
476 lock_buffer(bh);
477 bh = bh->b_this_page;
478
479 } while (bh != head);
480
481 ClearPagePrivate(page);
482 set_page_private(newpage, page_private(page));
483 set_page_private(page, 0);
484 put_page(page);
485 get_page(newpage);
486
487 bh = head;
488 do {
489 set_bh_page(bh, newpage, bh_offset(bh));
490 bh = bh->b_this_page;
491
492 } while (bh != head);
493
494 SetPagePrivate(newpage);
495
496 migrate_page_copy(newpage, page);
497
498 bh = head;
499 do {
500 unlock_buffer(bh);
501 put_bh(bh);
502 bh = bh->b_this_page;
503
504 } while (bh != head);
505
506 return 0;
507}
508EXPORT_SYMBOL(buffer_migrate_page);
9361401e 509#endif
1d8b85cc 510
04e62a29
CL
511/*
512 * Writeback a page to clean the dirty state
513 */
514static int writeout(struct address_space *mapping, struct page *page)
8351a6e4 515{
04e62a29
CL
516 struct writeback_control wbc = {
517 .sync_mode = WB_SYNC_NONE,
518 .nr_to_write = 1,
519 .range_start = 0,
520 .range_end = LLONG_MAX,
521 .nonblocking = 1,
522 .for_reclaim = 1
523 };
524 int rc;
525
526 if (!mapping->a_ops->writepage)
527 /* No write method for the address space */
528 return -EINVAL;
529
530 if (!clear_page_dirty_for_io(page))
531 /* Someone else already triggered a write */
532 return -EAGAIN;
533
8351a6e4 534 /*
04e62a29
CL
535 * A dirty page may imply that the underlying filesystem has
536 * the page on some queue. So the page must be clean for
537 * migration. Writeout may mean we loose the lock and the
538 * page state is no longer what we checked for earlier.
539 * At this point we know that the migration attempt cannot
540 * be successful.
8351a6e4 541 */
04e62a29 542 remove_migration_ptes(page, page);
8351a6e4 543
04e62a29
CL
544 rc = mapping->a_ops->writepage(page, &wbc);
545 if (rc < 0)
546 /* I/O Error writing */
547 return -EIO;
8351a6e4 548
04e62a29
CL
549 if (rc != AOP_WRITEPAGE_ACTIVATE)
550 /* unlocked. Relock */
551 lock_page(page);
552
553 return -EAGAIN;
554}
555
556/*
557 * Default handling if a filesystem does not provide a migration function.
558 */
559static int fallback_migrate_page(struct address_space *mapping,
560 struct page *newpage, struct page *page)
561{
562 if (PageDirty(page))
563 return writeout(mapping, page);
8351a6e4
CL
564
565 /*
566 * Buffers may be managed in a filesystem specific way.
567 * We must have no buffers or drop them.
568 */
b398f6bf 569 if (PagePrivate(page) &&
8351a6e4
CL
570 !try_to_release_page(page, GFP_KERNEL))
571 return -EAGAIN;
572
573 return migrate_page(mapping, newpage, page);
574}
575
e24f0b8f
CL
576/*
577 * Move a page to a newly allocated page
578 * The page is locked and all ptes have been successfully removed.
579 *
580 * The new page will have replaced the old page if this function
581 * is successful.
582 */
583static int move_to_new_page(struct page *newpage, struct page *page)
584{
585 struct address_space *mapping;
586 int rc;
587
588 /*
589 * Block others from accessing the page when we get around to
590 * establishing additional references. We are the only one
591 * holding a reference to the new page at this point.
592 */
593 if (TestSetPageLocked(newpage))
594 BUG();
595
596 /* Prepare mapping for the new page.*/
597 newpage->index = page->index;
598 newpage->mapping = page->mapping;
599
600 mapping = page_mapping(page);
601 if (!mapping)
602 rc = migrate_page(mapping, newpage, page);
603 else if (mapping->a_ops->migratepage)
604 /*
605 * Most pages have a mapping and most filesystems
606 * should provide a migration function. Anonymous
607 * pages are part of swap space which also has its
608 * own migration function. This is the most common
609 * path for page migration.
610 */
611 rc = mapping->a_ops->migratepage(mapping,
612 newpage, page);
613 else
614 rc = fallback_migrate_page(mapping, newpage, page);
615
ae41be37 616 if (!rc) {
e24f0b8f 617 remove_migration_ptes(page, newpage);
ae41be37 618 } else
e24f0b8f
CL
619 newpage->mapping = NULL;
620
621 unlock_page(newpage);
622
623 return rc;
624}
625
626/*
627 * Obtain the lock on page, remove all ptes and migrate the page
628 * to the newly allocated page in newpage.
629 */
95a402c3
CL
630static int unmap_and_move(new_page_t get_new_page, unsigned long private,
631 struct page *page, int force)
e24f0b8f
CL
632{
633 int rc = 0;
742755a1
CL
634 int *result = NULL;
635 struct page *newpage = get_new_page(page, private, &result);
989f89c5 636 int rcu_locked = 0;
ae41be37 637 int charge = 0;
95a402c3
CL
638
639 if (!newpage)
640 return -ENOMEM;
e24f0b8f
CL
641
642 if (page_count(page) == 1)
643 /* page was freed from under us. So we are done. */
95a402c3 644 goto move_newpage;
e24f0b8f 645
e8589cc1
KH
646 charge = mem_cgroup_prepare_migration(page, newpage);
647 if (charge == -ENOMEM) {
648 rc = -ENOMEM;
649 goto move_newpage;
650 }
651 /* prepare cgroup just returns 0 or -ENOMEM */
652 BUG_ON(charge);
653
e24f0b8f
CL
654 rc = -EAGAIN;
655 if (TestSetPageLocked(page)) {
656 if (!force)
95a402c3 657 goto move_newpage;
e24f0b8f
CL
658 lock_page(page);
659 }
660
661 if (PageWriteback(page)) {
662 if (!force)
663 goto unlock;
664 wait_on_page_writeback(page);
665 }
e24f0b8f 666 /*
dc386d4d
KH
667 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
668 * we cannot notice that anon_vma is freed while we migrates a page.
669 * This rcu_read_lock() delays freeing anon_vma pointer until the end
670 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
671 * File Caches may use write_page() or lock_page() in migration, then,
672 * just care Anon page here.
dc386d4d 673 */
989f89c5
KH
674 if (PageAnon(page)) {
675 rcu_read_lock();
676 rcu_locked = 1;
677 }
62e1c553 678
dc386d4d 679 /*
62e1c553
SL
680 * Corner case handling:
681 * 1. When a new swap-cache page is read into, it is added to the LRU
682 * and treated as swapcache but it has no rmap yet.
683 * Calling try_to_unmap() against a page->mapping==NULL page will
684 * trigger a BUG. So handle it here.
685 * 2. An orphaned page (see truncate_complete_page) might have
686 * fs-private metadata. The page can be picked up due to memory
687 * offlining. Everywhere else except page reclaim, the page is
688 * invisible to the vm, so the page can not be migrated. So try to
689 * free the metadata, so the page can be freed.
e24f0b8f 690 */
62e1c553
SL
691 if (!page->mapping) {
692 if (!PageAnon(page) && PagePrivate(page)) {
693 /*
694 * Go direct to try_to_free_buffers() here because
695 * a) that's what try_to_release_page() would do anyway
696 * b) we may be under rcu_read_lock() here, so we can't
697 * use GFP_KERNEL which is what try_to_release_page()
698 * needs to be effective.
699 */
700 try_to_free_buffers(page);
701 }
dc386d4d 702 goto rcu_unlock;
62e1c553
SL
703 }
704
dc386d4d 705 /* Establish migration ptes or remove ptes */
e6a1530d 706 try_to_unmap(page, 1);
dc386d4d 707
e6a1530d
CL
708 if (!page_mapped(page))
709 rc = move_to_new_page(newpage, page);
e24f0b8f 710
e8589cc1 711 if (rc)
e24f0b8f 712 remove_migration_ptes(page, page);
dc386d4d 713rcu_unlock:
989f89c5
KH
714 if (rcu_locked)
715 rcu_read_unlock();
e6a1530d 716
e24f0b8f 717unlock:
dc386d4d 718
e24f0b8f 719 unlock_page(page);
95a402c3 720
e24f0b8f 721 if (rc != -EAGAIN) {
aaa994b3
CL
722 /*
723 * A page that has been migrated has all references
724 * removed and will be freed. A page that has not been
725 * migrated will have kepts its references and be
726 * restored.
727 */
728 list_del(&page->lru);
729 move_to_lru(page);
e24f0b8f 730 }
95a402c3
CL
731
732move_newpage:
e8589cc1
KH
733 if (!charge)
734 mem_cgroup_end_migration(newpage);
95a402c3
CL
735 /*
736 * Move the new page to the LRU. If migration was not successful
737 * then this will free the page.
738 */
739 move_to_lru(newpage);
742755a1
CL
740 if (result) {
741 if (rc)
742 *result = rc;
743 else
744 *result = page_to_nid(newpage);
745 }
e24f0b8f
CL
746 return rc;
747}
748
b20a3503
CL
749/*
750 * migrate_pages
751 *
95a402c3
CL
752 * The function takes one list of pages to migrate and a function
753 * that determines from the page to be migrated and the private data
754 * the target of the move and allocates the page.
b20a3503
CL
755 *
756 * The function returns after 10 attempts or if no pages
757 * are movable anymore because to has become empty
aaa994b3 758 * or no retryable pages exist anymore. All pages will be
e9534b3f 759 * returned to the LRU or freed.
b20a3503 760 *
95a402c3 761 * Return: Number of pages not migrated or error code.
b20a3503 762 */
95a402c3
CL
763int migrate_pages(struct list_head *from,
764 new_page_t get_new_page, unsigned long private)
b20a3503 765{
e24f0b8f 766 int retry = 1;
b20a3503
CL
767 int nr_failed = 0;
768 int pass = 0;
769 struct page *page;
770 struct page *page2;
771 int swapwrite = current->flags & PF_SWAPWRITE;
772 int rc;
773
774 if (!swapwrite)
775 current->flags |= PF_SWAPWRITE;
776
e24f0b8f
CL
777 for(pass = 0; pass < 10 && retry; pass++) {
778 retry = 0;
b20a3503 779
e24f0b8f 780 list_for_each_entry_safe(page, page2, from, lru) {
e24f0b8f 781 cond_resched();
2d1db3b1 782
95a402c3
CL
783 rc = unmap_and_move(get_new_page, private,
784 page, pass > 2);
2d1db3b1 785
e24f0b8f 786 switch(rc) {
95a402c3
CL
787 case -ENOMEM:
788 goto out;
e24f0b8f 789 case -EAGAIN:
2d1db3b1 790 retry++;
e24f0b8f
CL
791 break;
792 case 0:
e24f0b8f
CL
793 break;
794 default:
2d1db3b1 795 /* Permanent failure */
2d1db3b1 796 nr_failed++;
e24f0b8f 797 break;
2d1db3b1 798 }
b20a3503
CL
799 }
800 }
95a402c3
CL
801 rc = 0;
802out:
b20a3503
CL
803 if (!swapwrite)
804 current->flags &= ~PF_SWAPWRITE;
805
aaa994b3 806 putback_lru_pages(from);
b20a3503 807
95a402c3
CL
808 if (rc)
809 return rc;
b20a3503 810
95a402c3 811 return nr_failed + retry;
b20a3503 812}
95a402c3 813
742755a1
CL
814#ifdef CONFIG_NUMA
815/*
816 * Move a list of individual pages
817 */
818struct page_to_node {
819 unsigned long addr;
820 struct page *page;
821 int node;
822 int status;
823};
824
825static struct page *new_page_node(struct page *p, unsigned long private,
826 int **result)
827{
828 struct page_to_node *pm = (struct page_to_node *)private;
829
830 while (pm->node != MAX_NUMNODES && pm->page != p)
831 pm++;
832
833 if (pm->node == MAX_NUMNODES)
834 return NULL;
835
836 *result = &pm->status;
837
769848c0
MG
838 return alloc_pages_node(pm->node,
839 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
742755a1
CL
840}
841
842/*
843 * Move a set of pages as indicated in the pm array. The addr
844 * field must be set to the virtual address of the page to be moved
845 * and the node number must contain a valid target node.
846 */
847static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
848 int migrate_all)
849{
850 int err;
851 struct page_to_node *pp;
852 LIST_HEAD(pagelist);
853
854 down_read(&mm->mmap_sem);
855
856 /*
857 * Build a list of pages to migrate
858 */
859 migrate_prep();
860 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
861 struct vm_area_struct *vma;
862 struct page *page;
863
864 /*
865 * A valid page pointer that will not match any of the
866 * pages that will be moved.
867 */
868 pp->page = ZERO_PAGE(0);
869
870 err = -EFAULT;
871 vma = find_vma(mm, pp->addr);
0dc952dc 872 if (!vma || !vma_migratable(vma))
742755a1
CL
873 goto set_status;
874
875 page = follow_page(vma, pp->addr, FOLL_GET);
89f5b7da
LT
876
877 err = PTR_ERR(page);
878 if (IS_ERR(page))
879 goto set_status;
880
742755a1
CL
881 err = -ENOENT;
882 if (!page)
883 goto set_status;
884
885 if (PageReserved(page)) /* Check for zero page */
886 goto put_and_set;
887
888 pp->page = page;
889 err = page_to_nid(page);
890
891 if (err == pp->node)
892 /*
893 * Node already in the right place
894 */
895 goto put_and_set;
896
897 err = -EACCES;
898 if (page_mapcount(page) > 1 &&
899 !migrate_all)
900 goto put_and_set;
901
902 err = isolate_lru_page(page, &pagelist);
903put_and_set:
904 /*
905 * Either remove the duplicate refcount from
906 * isolate_lru_page() or drop the page ref if it was
907 * not isolated.
908 */
909 put_page(page);
910set_status:
911 pp->status = err;
912 }
913
914 if (!list_empty(&pagelist))
915 err = migrate_pages(&pagelist, new_page_node,
916 (unsigned long)pm);
917 else
918 err = -ENOENT;
919
920 up_read(&mm->mmap_sem);
921 return err;
922}
923
924/*
925 * Determine the nodes of a list of pages. The addr in the pm array
926 * must have been set to the virtual address of which we want to determine
927 * the node number.
928 */
929static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
930{
931 down_read(&mm->mmap_sem);
932
933 for ( ; pm->node != MAX_NUMNODES; pm++) {
934 struct vm_area_struct *vma;
935 struct page *page;
936 int err;
937
938 err = -EFAULT;
939 vma = find_vma(mm, pm->addr);
940 if (!vma)
941 goto set_status;
942
943 page = follow_page(vma, pm->addr, 0);
89f5b7da
LT
944
945 err = PTR_ERR(page);
946 if (IS_ERR(page))
947 goto set_status;
948
742755a1
CL
949 err = -ENOENT;
950 /* Use PageReserved to check for zero page */
951 if (!page || PageReserved(page))
952 goto set_status;
953
954 err = page_to_nid(page);
955set_status:
956 pm->status = err;
957 }
958
959 up_read(&mm->mmap_sem);
960 return 0;
961}
962
963/*
964 * Move a list of pages in the address space of the currently executing
965 * process.
966 */
967asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
968 const void __user * __user *pages,
969 const int __user *nodes,
970 int __user *status, int flags)
971{
972 int err = 0;
973 int i;
974 struct task_struct *task;
975 nodemask_t task_nodes;
976 struct mm_struct *mm;
977 struct page_to_node *pm = NULL;
978
979 /* Check flags */
980 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
981 return -EINVAL;
982
983 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
984 return -EPERM;
985
986 /* Find the mm_struct */
987 read_lock(&tasklist_lock);
228ebcbe 988 task = pid ? find_task_by_vpid(pid) : current;
742755a1
CL
989 if (!task) {
990 read_unlock(&tasklist_lock);
991 return -ESRCH;
992 }
993 mm = get_task_mm(task);
994 read_unlock(&tasklist_lock);
995
996 if (!mm)
997 return -EINVAL;
998
999 /*
1000 * Check if this process has the right to modify the specified
1001 * process. The right exists if the process has administrative
1002 * capabilities, superuser privileges or the same
1003 * userid as the target process.
1004 */
1005 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1006 (current->uid != task->suid) && (current->uid != task->uid) &&
1007 !capable(CAP_SYS_NICE)) {
1008 err = -EPERM;
1009 goto out2;
1010 }
1011
86c3a764
DQ
1012 err = security_task_movememory(task);
1013 if (err)
1014 goto out2;
1015
1016
742755a1
CL
1017 task_nodes = cpuset_mems_allowed(task);
1018
1019 /* Limit nr_pages so that the multiplication may not overflow */
1020 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
1021 err = -E2BIG;
1022 goto out2;
1023 }
1024
1025 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
1026 if (!pm) {
1027 err = -ENOMEM;
1028 goto out2;
1029 }
1030
1031 /*
1032 * Get parameters from user space and initialize the pm
1033 * array. Return various errors if the user did something wrong.
1034 */
1035 for (i = 0; i < nr_pages; i++) {
9d966d49 1036 const void __user *p;
742755a1
CL
1037
1038 err = -EFAULT;
1039 if (get_user(p, pages + i))
1040 goto out;
1041
1042 pm[i].addr = (unsigned long)p;
1043 if (nodes) {
1044 int node;
1045
1046 if (get_user(node, nodes + i))
1047 goto out;
1048
1049 err = -ENODEV;
56bbd65d 1050 if (!node_state(node, N_HIGH_MEMORY))
742755a1
CL
1051 goto out;
1052
1053 err = -EACCES;
1054 if (!node_isset(node, task_nodes))
1055 goto out;
1056
1057 pm[i].node = node;
8ce08464
SR
1058 } else
1059 pm[i].node = 0; /* anything to not match MAX_NUMNODES */
742755a1
CL
1060 }
1061 /* End marker */
1062 pm[nr_pages].node = MAX_NUMNODES;
1063
1064 if (nodes)
1065 err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
1066 else
1067 err = do_pages_stat(mm, pm);
1068
1069 if (err >= 0)
1070 /* Return status information */
1071 for (i = 0; i < nr_pages; i++)
1072 if (put_user(pm[i].status, status + i))
1073 err = -EFAULT;
1074
1075out:
1076 vfree(pm);
1077out2:
1078 mmput(mm);
1079 return err;
1080}
742755a1 1081
7b2259b3
CL
1082/*
1083 * Call migration functions in the vma_ops that may prepare
1084 * memory in a vm for migration. migration functions may perform
1085 * the migration for vmas that do not have an underlying page struct.
1086 */
1087int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1088 const nodemask_t *from, unsigned long flags)
1089{
1090 struct vm_area_struct *vma;
1091 int err = 0;
1092
1093 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
1094 if (vma->vm_ops && vma->vm_ops->migrate) {
1095 err = vma->vm_ops->migrate(vma, to, from, flags);
1096 if (err)
1097 break;
1098 }
1099 }
1100 return err;
1101}
83d1674a 1102#endif