]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Memory Migration functionality - linux/mm/migration.c | |
3 | * | |
4 | * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter | |
5 | * | |
6 | * Page migration was first developed in the context of the memory hotplug | |
7 | * project. The main authors of the migration code are: | |
8 | * | |
9 | * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> | |
10 | * Hirokazu Takahashi <taka@valinux.co.jp> | |
11 | * Dave Hansen <haveblue@us.ibm.com> | |
12 | * Christoph Lameter | |
13 | */ | |
14 | ||
15 | #include <linux/migrate.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/swap.h> | |
18 | #include <linux/swapops.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/buffer_head.h> | |
21 | #include <linux/mm_inline.h> | |
22 | #include <linux/nsproxy.h> | |
23 | #include <linux/pagevec.h> | |
24 | #include <linux/rmap.h> | |
25 | #include <linux/topology.h> | |
26 | #include <linux/cpu.h> | |
27 | #include <linux/cpuset.h> | |
28 | #include <linux/writeback.h> | |
29 | #include <linux/mempolicy.h> | |
30 | #include <linux/vmalloc.h> | |
31 | #include <linux/security.h> | |
32 | #include <linux/memcontrol.h> | |
33 | #include <linux/syscalls.h> | |
34 | ||
35 | #include "internal.h" | |
36 | ||
37 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | |
38 | ||
39 | /* | |
40 | * migrate_prep() needs to be called before we start compiling a list of pages | |
41 | * to be migrated using isolate_lru_page(). | |
42 | */ | |
43 | int migrate_prep(void) | |
44 | { | |
45 | /* | |
46 | * Clear the LRU lists so pages can be isolated. | |
47 | * Note that pages may be moved off the LRU after we have | |
48 | * drained them. Those pages will fail to migrate like other | |
49 | * pages that may be busy. | |
50 | */ | |
51 | lru_add_drain_all(); | |
52 | ||
53 | return 0; | |
54 | } | |
55 | ||
56 | /* | |
57 | * Add isolated pages on the list back to the LRU under page lock | |
58 | * to avoid leaking evictable pages back onto unevictable list. | |
59 | * | |
60 | * returns the number of pages put back. | |
61 | */ | |
62 | int putback_lru_pages(struct list_head *l) | |
63 | { | |
64 | struct page *page; | |
65 | struct page *page2; | |
66 | int count = 0; | |
67 | ||
68 | list_for_each_entry_safe(page, page2, l, lru) { | |
69 | list_del(&page->lru); | |
70 | putback_lru_page(page); | |
71 | count++; | |
72 | } | |
73 | return count; | |
74 | } | |
75 | ||
76 | /* | |
77 | * Restore a potential migration pte to a working pte entry | |
78 | */ | |
79 | static void remove_migration_pte(struct vm_area_struct *vma, | |
80 | struct page *old, struct page *new) | |
81 | { | |
82 | struct mm_struct *mm = vma->vm_mm; | |
83 | swp_entry_t entry; | |
84 | pgd_t *pgd; | |
85 | pud_t *pud; | |
86 | pmd_t *pmd; | |
87 | pte_t *ptep, pte; | |
88 | spinlock_t *ptl; | |
89 | unsigned long addr = page_address_in_vma(new, vma); | |
90 | ||
91 | if (addr == -EFAULT) | |
92 | return; | |
93 | ||
94 | pgd = pgd_offset(mm, addr); | |
95 | if (!pgd_present(*pgd)) | |
96 | return; | |
97 | ||
98 | pud = pud_offset(pgd, addr); | |
99 | if (!pud_present(*pud)) | |
100 | return; | |
101 | ||
102 | pmd = pmd_offset(pud, addr); | |
103 | if (!pmd_present(*pmd)) | |
104 | return; | |
105 | ||
106 | ptep = pte_offset_map(pmd, addr); | |
107 | ||
108 | if (!is_swap_pte(*ptep)) { | |
109 | pte_unmap(ptep); | |
110 | return; | |
111 | } | |
112 | ||
113 | ptl = pte_lockptr(mm, pmd); | |
114 | spin_lock(ptl); | |
115 | pte = *ptep; | |
116 | if (!is_swap_pte(pte)) | |
117 | goto out; | |
118 | ||
119 | entry = pte_to_swp_entry(pte); | |
120 | ||
121 | if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) | |
122 | goto out; | |
123 | ||
124 | /* | |
125 | * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. | |
126 | * Failure is not an option here: we're now expected to remove every | |
127 | * migration pte, and will cause crashes otherwise. Normally this | |
128 | * is not an issue: mem_cgroup_prepare_migration bumped up the old | |
129 | * page_cgroup count for safety, that's now attached to the new page, | |
130 | * so this charge should just be another incrementation of the count, | |
131 | * to keep in balance with rmap.c's mem_cgroup_uncharging. But if | |
132 | * there's been a force_empty, those reference counts may no longer | |
133 | * be reliable, and this charge can actually fail: oh well, we don't | |
134 | * make the situation any worse by proceeding as if it had succeeded. | |
135 | */ | |
136 | mem_cgroup_charge(new, mm, GFP_ATOMIC); | |
137 | ||
138 | get_page(new); | |
139 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | |
140 | if (is_write_migration_entry(entry)) | |
141 | pte = pte_mkwrite(pte); | |
142 | flush_cache_page(vma, addr, pte_pfn(pte)); | |
143 | set_pte_at(mm, addr, ptep, pte); | |
144 | ||
145 | if (PageAnon(new)) | |
146 | page_add_anon_rmap(new, vma, addr); | |
147 | else | |
148 | page_add_file_rmap(new); | |
149 | ||
150 | /* No need to invalidate - it was non-present before */ | |
151 | update_mmu_cache(vma, addr, pte); | |
152 | ||
153 | out: | |
154 | pte_unmap_unlock(ptep, ptl); | |
155 | } | |
156 | ||
157 | /* | |
158 | * Note that remove_file_migration_ptes will only work on regular mappings, | |
159 | * Nonlinear mappings do not use migration entries. | |
160 | */ | |
161 | static void remove_file_migration_ptes(struct page *old, struct page *new) | |
162 | { | |
163 | struct vm_area_struct *vma; | |
164 | struct address_space *mapping = page_mapping(new); | |
165 | struct prio_tree_iter iter; | |
166 | pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
167 | ||
168 | if (!mapping) | |
169 | return; | |
170 | ||
171 | spin_lock(&mapping->i_mmap_lock); | |
172 | ||
173 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) | |
174 | remove_migration_pte(vma, old, new); | |
175 | ||
176 | spin_unlock(&mapping->i_mmap_lock); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Must hold mmap_sem lock on at least one of the vmas containing | |
181 | * the page so that the anon_vma cannot vanish. | |
182 | */ | |
183 | static void remove_anon_migration_ptes(struct page *old, struct page *new) | |
184 | { | |
185 | struct anon_vma *anon_vma; | |
186 | struct vm_area_struct *vma; | |
187 | unsigned long mapping; | |
188 | ||
189 | mapping = (unsigned long)new->mapping; | |
190 | ||
191 | if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) | |
192 | return; | |
193 | ||
194 | /* | |
195 | * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. | |
196 | */ | |
197 | anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); | |
198 | spin_lock(&anon_vma->lock); | |
199 | ||
200 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) | |
201 | remove_migration_pte(vma, old, new); | |
202 | ||
203 | spin_unlock(&anon_vma->lock); | |
204 | } | |
205 | ||
206 | /* | |
207 | * Get rid of all migration entries and replace them by | |
208 | * references to the indicated page. | |
209 | */ | |
210 | static void remove_migration_ptes(struct page *old, struct page *new) | |
211 | { | |
212 | if (PageAnon(new)) | |
213 | remove_anon_migration_ptes(old, new); | |
214 | else | |
215 | remove_file_migration_ptes(old, new); | |
216 | } | |
217 | ||
218 | /* | |
219 | * Something used the pte of a page under migration. We need to | |
220 | * get to the page and wait until migration is finished. | |
221 | * When we return from this function the fault will be retried. | |
222 | * | |
223 | * This function is called from do_swap_page(). | |
224 | */ | |
225 | void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | |
226 | unsigned long address) | |
227 | { | |
228 | pte_t *ptep, pte; | |
229 | spinlock_t *ptl; | |
230 | swp_entry_t entry; | |
231 | struct page *page; | |
232 | ||
233 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); | |
234 | pte = *ptep; | |
235 | if (!is_swap_pte(pte)) | |
236 | goto out; | |
237 | ||
238 | entry = pte_to_swp_entry(pte); | |
239 | if (!is_migration_entry(entry)) | |
240 | goto out; | |
241 | ||
242 | page = migration_entry_to_page(entry); | |
243 | ||
244 | /* | |
245 | * Once radix-tree replacement of page migration started, page_count | |
246 | * *must* be zero. And, we don't want to call wait_on_page_locked() | |
247 | * against a page without get_page(). | |
248 | * So, we use get_page_unless_zero(), here. Even failed, page fault | |
249 | * will occur again. | |
250 | */ | |
251 | if (!get_page_unless_zero(page)) | |
252 | goto out; | |
253 | pte_unmap_unlock(ptep, ptl); | |
254 | wait_on_page_locked(page); | |
255 | put_page(page); | |
256 | return; | |
257 | out: | |
258 | pte_unmap_unlock(ptep, ptl); | |
259 | } | |
260 | ||
261 | /* | |
262 | * Replace the page in the mapping. | |
263 | * | |
264 | * The number of remaining references must be: | |
265 | * 1 for anonymous pages without a mapping | |
266 | * 2 for pages with a mapping | |
267 | * 3 for pages with a mapping and PagePrivate set. | |
268 | */ | |
269 | static int migrate_page_move_mapping(struct address_space *mapping, | |
270 | struct page *newpage, struct page *page) | |
271 | { | |
272 | int expected_count; | |
273 | void **pslot; | |
274 | ||
275 | if (!mapping) { | |
276 | /* Anonymous page without mapping */ | |
277 | if (page_count(page) != 1) | |
278 | return -EAGAIN; | |
279 | return 0; | |
280 | } | |
281 | ||
282 | spin_lock_irq(&mapping->tree_lock); | |
283 | ||
284 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | |
285 | page_index(page)); | |
286 | ||
287 | expected_count = 2 + !!PagePrivate(page); | |
288 | if (page_count(page) != expected_count || | |
289 | (struct page *)radix_tree_deref_slot(pslot) != page) { | |
290 | spin_unlock_irq(&mapping->tree_lock); | |
291 | return -EAGAIN; | |
292 | } | |
293 | ||
294 | if (!page_freeze_refs(page, expected_count)) { | |
295 | spin_unlock_irq(&mapping->tree_lock); | |
296 | return -EAGAIN; | |
297 | } | |
298 | ||
299 | /* | |
300 | * Now we know that no one else is looking at the page. | |
301 | */ | |
302 | get_page(newpage); /* add cache reference */ | |
303 | #ifdef CONFIG_SWAP | |
304 | if (PageSwapCache(page)) { | |
305 | SetPageSwapCache(newpage); | |
306 | set_page_private(newpage, page_private(page)); | |
307 | } | |
308 | #endif | |
309 | ||
310 | radix_tree_replace_slot(pslot, newpage); | |
311 | ||
312 | page_unfreeze_refs(page, expected_count); | |
313 | /* | |
314 | * Drop cache reference from old page. | |
315 | * We know this isn't the last reference. | |
316 | */ | |
317 | __put_page(page); | |
318 | ||
319 | /* | |
320 | * If moved to a different zone then also account | |
321 | * the page for that zone. Other VM counters will be | |
322 | * taken care of when we establish references to the | |
323 | * new page and drop references to the old page. | |
324 | * | |
325 | * Note that anonymous pages are accounted for | |
326 | * via NR_FILE_PAGES and NR_ANON_PAGES if they | |
327 | * are mapped to swap space. | |
328 | */ | |
329 | __dec_zone_page_state(page, NR_FILE_PAGES); | |
330 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | |
331 | ||
332 | spin_unlock_irq(&mapping->tree_lock); | |
333 | ||
334 | return 0; | |
335 | } | |
336 | ||
337 | /* | |
338 | * Copy the page to its new location | |
339 | */ | |
340 | static void migrate_page_copy(struct page *newpage, struct page *page) | |
341 | { | |
342 | int anon; | |
343 | ||
344 | copy_highpage(newpage, page); | |
345 | ||
346 | if (PageError(page)) | |
347 | SetPageError(newpage); | |
348 | if (PageReferenced(page)) | |
349 | SetPageReferenced(newpage); | |
350 | if (PageUptodate(page)) | |
351 | SetPageUptodate(newpage); | |
352 | if (TestClearPageActive(page)) { | |
353 | VM_BUG_ON(PageUnevictable(page)); | |
354 | SetPageActive(newpage); | |
355 | } else | |
356 | unevictable_migrate_page(newpage, page); | |
357 | if (PageChecked(page)) | |
358 | SetPageChecked(newpage); | |
359 | if (PageMappedToDisk(page)) | |
360 | SetPageMappedToDisk(newpage); | |
361 | ||
362 | if (PageDirty(page)) { | |
363 | clear_page_dirty_for_io(page); | |
364 | /* | |
365 | * Want to mark the page and the radix tree as dirty, and | |
366 | * redo the accounting that clear_page_dirty_for_io undid, | |
367 | * but we can't use set_page_dirty because that function | |
368 | * is actually a signal that all of the page has become dirty. | |
369 | * Wheras only part of our page may be dirty. | |
370 | */ | |
371 | __set_page_dirty_nobuffers(newpage); | |
372 | } | |
373 | ||
374 | mlock_migrate_page(newpage, page); | |
375 | ||
376 | #ifdef CONFIG_SWAP | |
377 | ClearPageSwapCache(page); | |
378 | #endif | |
379 | ClearPagePrivate(page); | |
380 | set_page_private(page, 0); | |
381 | /* page->mapping contains a flag for PageAnon() */ | |
382 | anon = PageAnon(page); | |
383 | page->mapping = NULL; | |
384 | ||
385 | if (!anon) /* This page was removed from radix-tree. */ | |
386 | mem_cgroup_uncharge_cache_page(page); | |
387 | ||
388 | /* | |
389 | * If any waiters have accumulated on the new page then | |
390 | * wake them up. | |
391 | */ | |
392 | if (PageWriteback(newpage)) | |
393 | end_page_writeback(newpage); | |
394 | } | |
395 | ||
396 | /************************************************************ | |
397 | * Migration functions | |
398 | ***********************************************************/ | |
399 | ||
400 | /* Always fail migration. Used for mappings that are not movable */ | |
401 | int fail_migrate_page(struct address_space *mapping, | |
402 | struct page *newpage, struct page *page) | |
403 | { | |
404 | return -EIO; | |
405 | } | |
406 | EXPORT_SYMBOL(fail_migrate_page); | |
407 | ||
408 | /* | |
409 | * Common logic to directly migrate a single page suitable for | |
410 | * pages that do not use PagePrivate. | |
411 | * | |
412 | * Pages are locked upon entry and exit. | |
413 | */ | |
414 | int migrate_page(struct address_space *mapping, | |
415 | struct page *newpage, struct page *page) | |
416 | { | |
417 | int rc; | |
418 | ||
419 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | |
420 | ||
421 | rc = migrate_page_move_mapping(mapping, newpage, page); | |
422 | ||
423 | if (rc) | |
424 | return rc; | |
425 | ||
426 | migrate_page_copy(newpage, page); | |
427 | return 0; | |
428 | } | |
429 | EXPORT_SYMBOL(migrate_page); | |
430 | ||
431 | #ifdef CONFIG_BLOCK | |
432 | /* | |
433 | * Migration function for pages with buffers. This function can only be used | |
434 | * if the underlying filesystem guarantees that no other references to "page" | |
435 | * exist. | |
436 | */ | |
437 | int buffer_migrate_page(struct address_space *mapping, | |
438 | struct page *newpage, struct page *page) | |
439 | { | |
440 | struct buffer_head *bh, *head; | |
441 | int rc; | |
442 | ||
443 | if (!page_has_buffers(page)) | |
444 | return migrate_page(mapping, newpage, page); | |
445 | ||
446 | head = page_buffers(page); | |
447 | ||
448 | rc = migrate_page_move_mapping(mapping, newpage, page); | |
449 | ||
450 | if (rc) | |
451 | return rc; | |
452 | ||
453 | bh = head; | |
454 | do { | |
455 | get_bh(bh); | |
456 | lock_buffer(bh); | |
457 | bh = bh->b_this_page; | |
458 | ||
459 | } while (bh != head); | |
460 | ||
461 | ClearPagePrivate(page); | |
462 | set_page_private(newpage, page_private(page)); | |
463 | set_page_private(page, 0); | |
464 | put_page(page); | |
465 | get_page(newpage); | |
466 | ||
467 | bh = head; | |
468 | do { | |
469 | set_bh_page(bh, newpage, bh_offset(bh)); | |
470 | bh = bh->b_this_page; | |
471 | ||
472 | } while (bh != head); | |
473 | ||
474 | SetPagePrivate(newpage); | |
475 | ||
476 | migrate_page_copy(newpage, page); | |
477 | ||
478 | bh = head; | |
479 | do { | |
480 | unlock_buffer(bh); | |
481 | put_bh(bh); | |
482 | bh = bh->b_this_page; | |
483 | ||
484 | } while (bh != head); | |
485 | ||
486 | return 0; | |
487 | } | |
488 | EXPORT_SYMBOL(buffer_migrate_page); | |
489 | #endif | |
490 | ||
491 | /* | |
492 | * Writeback a page to clean the dirty state | |
493 | */ | |
494 | static int writeout(struct address_space *mapping, struct page *page) | |
495 | { | |
496 | struct writeback_control wbc = { | |
497 | .sync_mode = WB_SYNC_NONE, | |
498 | .nr_to_write = 1, | |
499 | .range_start = 0, | |
500 | .range_end = LLONG_MAX, | |
501 | .nonblocking = 1, | |
502 | .for_reclaim = 1 | |
503 | }; | |
504 | int rc; | |
505 | ||
506 | if (!mapping->a_ops->writepage) | |
507 | /* No write method for the address space */ | |
508 | return -EINVAL; | |
509 | ||
510 | if (!clear_page_dirty_for_io(page)) | |
511 | /* Someone else already triggered a write */ | |
512 | return -EAGAIN; | |
513 | ||
514 | /* | |
515 | * A dirty page may imply that the underlying filesystem has | |
516 | * the page on some queue. So the page must be clean for | |
517 | * migration. Writeout may mean we loose the lock and the | |
518 | * page state is no longer what we checked for earlier. | |
519 | * At this point we know that the migration attempt cannot | |
520 | * be successful. | |
521 | */ | |
522 | remove_migration_ptes(page, page); | |
523 | ||
524 | rc = mapping->a_ops->writepage(page, &wbc); | |
525 | if (rc < 0) | |
526 | /* I/O Error writing */ | |
527 | return -EIO; | |
528 | ||
529 | if (rc != AOP_WRITEPAGE_ACTIVATE) | |
530 | /* unlocked. Relock */ | |
531 | lock_page(page); | |
532 | ||
533 | return -EAGAIN; | |
534 | } | |
535 | ||
536 | /* | |
537 | * Default handling if a filesystem does not provide a migration function. | |
538 | */ | |
539 | static int fallback_migrate_page(struct address_space *mapping, | |
540 | struct page *newpage, struct page *page) | |
541 | { | |
542 | if (PageDirty(page)) | |
543 | return writeout(mapping, page); | |
544 | ||
545 | /* | |
546 | * Buffers may be managed in a filesystem specific way. | |
547 | * We must have no buffers or drop them. | |
548 | */ | |
549 | if (PagePrivate(page) && | |
550 | !try_to_release_page(page, GFP_KERNEL)) | |
551 | return -EAGAIN; | |
552 | ||
553 | return migrate_page(mapping, newpage, page); | |
554 | } | |
555 | ||
556 | /* | |
557 | * Move a page to a newly allocated page | |
558 | * The page is locked and all ptes have been successfully removed. | |
559 | * | |
560 | * The new page will have replaced the old page if this function | |
561 | * is successful. | |
562 | * | |
563 | * Return value: | |
564 | * < 0 - error code | |
565 | * == 0 - success | |
566 | */ | |
567 | static int move_to_new_page(struct page *newpage, struct page *page) | |
568 | { | |
569 | struct address_space *mapping; | |
570 | int rc; | |
571 | ||
572 | /* | |
573 | * Block others from accessing the page when we get around to | |
574 | * establishing additional references. We are the only one | |
575 | * holding a reference to the new page at this point. | |
576 | */ | |
577 | if (!trylock_page(newpage)) | |
578 | BUG(); | |
579 | ||
580 | /* Prepare mapping for the new page.*/ | |
581 | newpage->index = page->index; | |
582 | newpage->mapping = page->mapping; | |
583 | if (PageSwapBacked(page)) | |
584 | SetPageSwapBacked(newpage); | |
585 | ||
586 | mapping = page_mapping(page); | |
587 | if (!mapping) | |
588 | rc = migrate_page(mapping, newpage, page); | |
589 | else if (mapping->a_ops->migratepage) | |
590 | /* | |
591 | * Most pages have a mapping and most filesystems | |
592 | * should provide a migration function. Anonymous | |
593 | * pages are part of swap space which also has its | |
594 | * own migration function. This is the most common | |
595 | * path for page migration. | |
596 | */ | |
597 | rc = mapping->a_ops->migratepage(mapping, | |
598 | newpage, page); | |
599 | else | |
600 | rc = fallback_migrate_page(mapping, newpage, page); | |
601 | ||
602 | if (!rc) { | |
603 | remove_migration_ptes(page, newpage); | |
604 | } else | |
605 | newpage->mapping = NULL; | |
606 | ||
607 | unlock_page(newpage); | |
608 | ||
609 | return rc; | |
610 | } | |
611 | ||
612 | /* | |
613 | * Obtain the lock on page, remove all ptes and migrate the page | |
614 | * to the newly allocated page in newpage. | |
615 | */ | |
616 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |
617 | struct page *page, int force) | |
618 | { | |
619 | int rc = 0; | |
620 | int *result = NULL; | |
621 | struct page *newpage = get_new_page(page, private, &result); | |
622 | int rcu_locked = 0; | |
623 | int charge = 0; | |
624 | ||
625 | if (!newpage) | |
626 | return -ENOMEM; | |
627 | ||
628 | if (page_count(page) == 1) { | |
629 | /* page was freed from under us. So we are done. */ | |
630 | goto move_newpage; | |
631 | } | |
632 | ||
633 | charge = mem_cgroup_prepare_migration(page, newpage); | |
634 | if (charge == -ENOMEM) { | |
635 | rc = -ENOMEM; | |
636 | goto move_newpage; | |
637 | } | |
638 | /* prepare cgroup just returns 0 or -ENOMEM */ | |
639 | BUG_ON(charge); | |
640 | ||
641 | rc = -EAGAIN; | |
642 | if (!trylock_page(page)) { | |
643 | if (!force) | |
644 | goto move_newpage; | |
645 | lock_page(page); | |
646 | } | |
647 | ||
648 | if (PageWriteback(page)) { | |
649 | if (!force) | |
650 | goto unlock; | |
651 | wait_on_page_writeback(page); | |
652 | } | |
653 | /* | |
654 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | |
655 | * we cannot notice that anon_vma is freed while we migrates a page. | |
656 | * This rcu_read_lock() delays freeing anon_vma pointer until the end | |
657 | * of migration. File cache pages are no problem because of page_lock() | |
658 | * File Caches may use write_page() or lock_page() in migration, then, | |
659 | * just care Anon page here. | |
660 | */ | |
661 | if (PageAnon(page)) { | |
662 | rcu_read_lock(); | |
663 | rcu_locked = 1; | |
664 | } | |
665 | ||
666 | /* | |
667 | * Corner case handling: | |
668 | * 1. When a new swap-cache page is read into, it is added to the LRU | |
669 | * and treated as swapcache but it has no rmap yet. | |
670 | * Calling try_to_unmap() against a page->mapping==NULL page will | |
671 | * trigger a BUG. So handle it here. | |
672 | * 2. An orphaned page (see truncate_complete_page) might have | |
673 | * fs-private metadata. The page can be picked up due to memory | |
674 | * offlining. Everywhere else except page reclaim, the page is | |
675 | * invisible to the vm, so the page can not be migrated. So try to | |
676 | * free the metadata, so the page can be freed. | |
677 | */ | |
678 | if (!page->mapping) { | |
679 | if (!PageAnon(page) && PagePrivate(page)) { | |
680 | /* | |
681 | * Go direct to try_to_free_buffers() here because | |
682 | * a) that's what try_to_release_page() would do anyway | |
683 | * b) we may be under rcu_read_lock() here, so we can't | |
684 | * use GFP_KERNEL which is what try_to_release_page() | |
685 | * needs to be effective. | |
686 | */ | |
687 | try_to_free_buffers(page); | |
688 | } | |
689 | goto rcu_unlock; | |
690 | } | |
691 | ||
692 | /* Establish migration ptes or remove ptes */ | |
693 | try_to_unmap(page, 1); | |
694 | ||
695 | if (!page_mapped(page)) | |
696 | rc = move_to_new_page(newpage, page); | |
697 | ||
698 | if (rc) | |
699 | remove_migration_ptes(page, page); | |
700 | rcu_unlock: | |
701 | if (rcu_locked) | |
702 | rcu_read_unlock(); | |
703 | ||
704 | unlock: | |
705 | unlock_page(page); | |
706 | ||
707 | if (rc != -EAGAIN) { | |
708 | /* | |
709 | * A page that has been migrated has all references | |
710 | * removed and will be freed. A page that has not been | |
711 | * migrated will have kepts its references and be | |
712 | * restored. | |
713 | */ | |
714 | list_del(&page->lru); | |
715 | putback_lru_page(page); | |
716 | } | |
717 | ||
718 | move_newpage: | |
719 | if (!charge) | |
720 | mem_cgroup_end_migration(newpage); | |
721 | ||
722 | /* | |
723 | * Move the new page to the LRU. If migration was not successful | |
724 | * then this will free the page. | |
725 | */ | |
726 | putback_lru_page(newpage); | |
727 | ||
728 | if (result) { | |
729 | if (rc) | |
730 | *result = rc; | |
731 | else | |
732 | *result = page_to_nid(newpage); | |
733 | } | |
734 | return rc; | |
735 | } | |
736 | ||
737 | /* | |
738 | * migrate_pages | |
739 | * | |
740 | * The function takes one list of pages to migrate and a function | |
741 | * that determines from the page to be migrated and the private data | |
742 | * the target of the move and allocates the page. | |
743 | * | |
744 | * The function returns after 10 attempts or if no pages | |
745 | * are movable anymore because to has become empty | |
746 | * or no retryable pages exist anymore. All pages will be | |
747 | * returned to the LRU or freed. | |
748 | * | |
749 | * Return: Number of pages not migrated or error code. | |
750 | */ | |
751 | int migrate_pages(struct list_head *from, | |
752 | new_page_t get_new_page, unsigned long private) | |
753 | { | |
754 | int retry = 1; | |
755 | int nr_failed = 0; | |
756 | int pass = 0; | |
757 | struct page *page; | |
758 | struct page *page2; | |
759 | int swapwrite = current->flags & PF_SWAPWRITE; | |
760 | int rc; | |
761 | ||
762 | if (!swapwrite) | |
763 | current->flags |= PF_SWAPWRITE; | |
764 | ||
765 | for(pass = 0; pass < 10 && retry; pass++) { | |
766 | retry = 0; | |
767 | ||
768 | list_for_each_entry_safe(page, page2, from, lru) { | |
769 | cond_resched(); | |
770 | ||
771 | rc = unmap_and_move(get_new_page, private, | |
772 | page, pass > 2); | |
773 | ||
774 | switch(rc) { | |
775 | case -ENOMEM: | |
776 | goto out; | |
777 | case -EAGAIN: | |
778 | retry++; | |
779 | break; | |
780 | case 0: | |
781 | break; | |
782 | default: | |
783 | /* Permanent failure */ | |
784 | nr_failed++; | |
785 | break; | |
786 | } | |
787 | } | |
788 | } | |
789 | rc = 0; | |
790 | out: | |
791 | if (!swapwrite) | |
792 | current->flags &= ~PF_SWAPWRITE; | |
793 | ||
794 | putback_lru_pages(from); | |
795 | ||
796 | if (rc) | |
797 | return rc; | |
798 | ||
799 | return nr_failed + retry; | |
800 | } | |
801 | ||
802 | #ifdef CONFIG_NUMA | |
803 | /* | |
804 | * Move a list of individual pages | |
805 | */ | |
806 | struct page_to_node { | |
807 | unsigned long addr; | |
808 | struct page *page; | |
809 | int node; | |
810 | int status; | |
811 | }; | |
812 | ||
813 | static struct page *new_page_node(struct page *p, unsigned long private, | |
814 | int **result) | |
815 | { | |
816 | struct page_to_node *pm = (struct page_to_node *)private; | |
817 | ||
818 | while (pm->node != MAX_NUMNODES && pm->page != p) | |
819 | pm++; | |
820 | ||
821 | if (pm->node == MAX_NUMNODES) | |
822 | return NULL; | |
823 | ||
824 | *result = &pm->status; | |
825 | ||
826 | return alloc_pages_node(pm->node, | |
827 | GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | |
828 | } | |
829 | ||
830 | /* | |
831 | * Move a set of pages as indicated in the pm array. The addr | |
832 | * field must be set to the virtual address of the page to be moved | |
833 | * and the node number must contain a valid target node. | |
834 | * The pm array ends with node = MAX_NUMNODES. | |
835 | */ | |
836 | static int do_move_page_to_node_array(struct mm_struct *mm, | |
837 | struct page_to_node *pm, | |
838 | int migrate_all) | |
839 | { | |
840 | int err; | |
841 | struct page_to_node *pp; | |
842 | LIST_HEAD(pagelist); | |
843 | ||
844 | down_read(&mm->mmap_sem); | |
845 | ||
846 | /* | |
847 | * Build a list of pages to migrate | |
848 | */ | |
849 | migrate_prep(); | |
850 | for (pp = pm; pp->node != MAX_NUMNODES; pp++) { | |
851 | struct vm_area_struct *vma; | |
852 | struct page *page; | |
853 | ||
854 | /* | |
855 | * A valid page pointer that will not match any of the | |
856 | * pages that will be moved. | |
857 | */ | |
858 | pp->page = ZERO_PAGE(0); | |
859 | ||
860 | err = -EFAULT; | |
861 | vma = find_vma(mm, pp->addr); | |
862 | if (!vma || !vma_migratable(vma)) | |
863 | goto set_status; | |
864 | ||
865 | page = follow_page(vma, pp->addr, FOLL_GET); | |
866 | ||
867 | err = PTR_ERR(page); | |
868 | if (IS_ERR(page)) | |
869 | goto set_status; | |
870 | ||
871 | err = -ENOENT; | |
872 | if (!page) | |
873 | goto set_status; | |
874 | ||
875 | if (PageReserved(page)) /* Check for zero page */ | |
876 | goto put_and_set; | |
877 | ||
878 | pp->page = page; | |
879 | err = page_to_nid(page); | |
880 | ||
881 | if (err == pp->node) | |
882 | /* | |
883 | * Node already in the right place | |
884 | */ | |
885 | goto put_and_set; | |
886 | ||
887 | err = -EACCES; | |
888 | if (page_mapcount(page) > 1 && | |
889 | !migrate_all) | |
890 | goto put_and_set; | |
891 | ||
892 | err = isolate_lru_page(page); | |
893 | if (!err) | |
894 | list_add_tail(&page->lru, &pagelist); | |
895 | put_and_set: | |
896 | /* | |
897 | * Either remove the duplicate refcount from | |
898 | * isolate_lru_page() or drop the page ref if it was | |
899 | * not isolated. | |
900 | */ | |
901 | put_page(page); | |
902 | set_status: | |
903 | pp->status = err; | |
904 | } | |
905 | ||
906 | err = 0; | |
907 | if (!list_empty(&pagelist)) | |
908 | err = migrate_pages(&pagelist, new_page_node, | |
909 | (unsigned long)pm); | |
910 | ||
911 | up_read(&mm->mmap_sem); | |
912 | return err; | |
913 | } | |
914 | ||
915 | /* | |
916 | * Migrate an array of page address onto an array of nodes and fill | |
917 | * the corresponding array of status. | |
918 | */ | |
919 | static int do_pages_move(struct mm_struct *mm, struct task_struct *task, | |
920 | unsigned long nr_pages, | |
921 | const void __user * __user *pages, | |
922 | const int __user *nodes, | |
923 | int __user *status, int flags) | |
924 | { | |
925 | struct page_to_node *pm = NULL; | |
926 | nodemask_t task_nodes; | |
927 | int err = 0; | |
928 | int i; | |
929 | ||
930 | task_nodes = cpuset_mems_allowed(task); | |
931 | ||
932 | /* Limit nr_pages so that the multiplication may not overflow */ | |
933 | if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) { | |
934 | err = -E2BIG; | |
935 | goto out; | |
936 | } | |
937 | ||
938 | pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node)); | |
939 | if (!pm) { | |
940 | err = -ENOMEM; | |
941 | goto out; | |
942 | } | |
943 | ||
944 | /* | |
945 | * Get parameters from user space and initialize the pm | |
946 | * array. Return various errors if the user did something wrong. | |
947 | */ | |
948 | for (i = 0; i < nr_pages; i++) { | |
949 | const void __user *p; | |
950 | ||
951 | err = -EFAULT; | |
952 | if (get_user(p, pages + i)) | |
953 | goto out_pm; | |
954 | ||
955 | pm[i].addr = (unsigned long)p; | |
956 | if (nodes) { | |
957 | int node; | |
958 | ||
959 | if (get_user(node, nodes + i)) | |
960 | goto out_pm; | |
961 | ||
962 | err = -ENODEV; | |
963 | if (!node_state(node, N_HIGH_MEMORY)) | |
964 | goto out_pm; | |
965 | ||
966 | err = -EACCES; | |
967 | if (!node_isset(node, task_nodes)) | |
968 | goto out_pm; | |
969 | ||
970 | pm[i].node = node; | |
971 | } else | |
972 | pm[i].node = 0; /* anything to not match MAX_NUMNODES */ | |
973 | } | |
974 | /* End marker */ | |
975 | pm[nr_pages].node = MAX_NUMNODES; | |
976 | ||
977 | err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL); | |
978 | if (err >= 0) | |
979 | /* Return status information */ | |
980 | for (i = 0; i < nr_pages; i++) | |
981 | if (put_user(pm[i].status, status + i)) | |
982 | err = -EFAULT; | |
983 | ||
984 | out_pm: | |
985 | vfree(pm); | |
986 | out: | |
987 | return err; | |
988 | } | |
989 | ||
990 | /* | |
991 | * Determine the nodes of an array of pages and store it in an array of status. | |
992 | */ | |
993 | static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, | |
994 | const void __user * __user *pages, | |
995 | int __user *status) | |
996 | { | |
997 | unsigned long i; | |
998 | int err; | |
999 | ||
1000 | down_read(&mm->mmap_sem); | |
1001 | ||
1002 | for (i = 0; i < nr_pages; i++) { | |
1003 | const void __user *p; | |
1004 | unsigned long addr; | |
1005 | struct vm_area_struct *vma; | |
1006 | struct page *page; | |
1007 | ||
1008 | err = -EFAULT; | |
1009 | if (get_user(p, pages+i)) | |
1010 | goto out; | |
1011 | addr = (unsigned long) p; | |
1012 | ||
1013 | vma = find_vma(mm, addr); | |
1014 | if (!vma) | |
1015 | goto set_status; | |
1016 | ||
1017 | page = follow_page(vma, addr, 0); | |
1018 | ||
1019 | err = PTR_ERR(page); | |
1020 | if (IS_ERR(page)) | |
1021 | goto set_status; | |
1022 | ||
1023 | err = -ENOENT; | |
1024 | /* Use PageReserved to check for zero page */ | |
1025 | if (!page || PageReserved(page)) | |
1026 | goto set_status; | |
1027 | ||
1028 | err = page_to_nid(page); | |
1029 | set_status: | |
1030 | put_user(err, status+i); | |
1031 | } | |
1032 | err = 0; | |
1033 | ||
1034 | out: | |
1035 | up_read(&mm->mmap_sem); | |
1036 | return err; | |
1037 | } | |
1038 | ||
1039 | /* | |
1040 | * Move a list of pages in the address space of the currently executing | |
1041 | * process. | |
1042 | */ | |
1043 | asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, | |
1044 | const void __user * __user *pages, | |
1045 | const int __user *nodes, | |
1046 | int __user *status, int flags) | |
1047 | { | |
1048 | struct cred *cred, *tcred; | |
1049 | struct task_struct *task; | |
1050 | struct mm_struct *mm; | |
1051 | int err; | |
1052 | ||
1053 | /* Check flags */ | |
1054 | if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) | |
1055 | return -EINVAL; | |
1056 | ||
1057 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) | |
1058 | return -EPERM; | |
1059 | ||
1060 | /* Find the mm_struct */ | |
1061 | read_lock(&tasklist_lock); | |
1062 | task = pid ? find_task_by_vpid(pid) : current; | |
1063 | if (!task) { | |
1064 | read_unlock(&tasklist_lock); | |
1065 | return -ESRCH; | |
1066 | } | |
1067 | mm = get_task_mm(task); | |
1068 | read_unlock(&tasklist_lock); | |
1069 | ||
1070 | if (!mm) | |
1071 | return -EINVAL; | |
1072 | ||
1073 | /* | |
1074 | * Check if this process has the right to modify the specified | |
1075 | * process. The right exists if the process has administrative | |
1076 | * capabilities, superuser privileges or the same | |
1077 | * userid as the target process. | |
1078 | */ | |
1079 | cred = current->cred; | |
1080 | tcred = task->cred; | |
1081 | if (cred->euid != tcred->suid && cred->euid != tcred->uid && | |
1082 | cred->uid != tcred->suid && cred->uid != tcred->uid && | |
1083 | !capable(CAP_SYS_NICE)) { | |
1084 | err = -EPERM; | |
1085 | goto out; | |
1086 | } | |
1087 | ||
1088 | err = security_task_movememory(task); | |
1089 | if (err) | |
1090 | goto out; | |
1091 | ||
1092 | if (nodes) { | |
1093 | err = do_pages_move(mm, task, nr_pages, pages, nodes, status, | |
1094 | flags); | |
1095 | } else { | |
1096 | err = do_pages_stat(mm, nr_pages, pages, status); | |
1097 | } | |
1098 | ||
1099 | out: | |
1100 | mmput(mm); | |
1101 | return err; | |
1102 | } | |
1103 | ||
1104 | /* | |
1105 | * Call migration functions in the vma_ops that may prepare | |
1106 | * memory in a vm for migration. migration functions may perform | |
1107 | * the migration for vmas that do not have an underlying page struct. | |
1108 | */ | |
1109 | int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | |
1110 | const nodemask_t *from, unsigned long flags) | |
1111 | { | |
1112 | struct vm_area_struct *vma; | |
1113 | int err = 0; | |
1114 | ||
1115 | for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) { | |
1116 | if (vma->vm_ops && vma->vm_ops->migrate) { | |
1117 | err = vma->vm_ops->migrate(vma, to, from, flags); | |
1118 | if (err) | |
1119 | break; | |
1120 | } | |
1121 | } | |
1122 | return err; | |
1123 | } | |
1124 | #endif |