]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/memory_hotplug.c | |
3 | * | |
4 | * Copyright (C) | |
5 | */ | |
6 | ||
7 | #include <linux/stddef.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/swap.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/pagevec.h> | |
16 | #include <linux/writeback.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
21 | #include <linux/memory_hotplug.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/migrate.h> | |
27 | #include <linux/page-isolation.h> | |
28 | #include <linux/pfn.h> | |
29 | #include <linux/suspend.h> | |
30 | #include <linux/mm_inline.h> | |
31 | #include <linux/firmware-map.h> | |
32 | ||
33 | #include <asm/tlbflush.h> | |
34 | ||
35 | #include "internal.h" | |
36 | ||
37 | /* add this memory to iomem resource */ | |
38 | static struct resource *register_memory_resource(u64 start, u64 size) | |
39 | { | |
40 | struct resource *res; | |
41 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | |
42 | BUG_ON(!res); | |
43 | ||
44 | res->name = "System RAM"; | |
45 | res->start = start; | |
46 | res->end = start + size - 1; | |
47 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
48 | if (request_resource(&iomem_resource, res) < 0) { | |
49 | printk("System RAM resource %llx - %llx cannot be added\n", | |
50 | (unsigned long long)res->start, (unsigned long long)res->end); | |
51 | kfree(res); | |
52 | res = NULL; | |
53 | } | |
54 | return res; | |
55 | } | |
56 | ||
57 | static void release_memory_resource(struct resource *res) | |
58 | { | |
59 | if (!res) | |
60 | return; | |
61 | release_resource(res); | |
62 | kfree(res); | |
63 | return; | |
64 | } | |
65 | ||
66 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | |
67 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
68 | static void get_page_bootmem(unsigned long info, struct page *page, int type) | |
69 | { | |
70 | atomic_set(&page->_mapcount, type); | |
71 | SetPagePrivate(page); | |
72 | set_page_private(page, info); | |
73 | atomic_inc(&page->_count); | |
74 | } | |
75 | ||
76 | /* reference to __meminit __free_pages_bootmem is valid | |
77 | * so use __ref to tell modpost not to generate a warning */ | |
78 | void __ref put_page_bootmem(struct page *page) | |
79 | { | |
80 | int type; | |
81 | ||
82 | type = atomic_read(&page->_mapcount); | |
83 | BUG_ON(type >= -1); | |
84 | ||
85 | if (atomic_dec_return(&page->_count) == 1) { | |
86 | ClearPagePrivate(page); | |
87 | set_page_private(page, 0); | |
88 | reset_page_mapcount(page); | |
89 | __free_pages_bootmem(page, 0); | |
90 | } | |
91 | ||
92 | } | |
93 | ||
94 | static void register_page_bootmem_info_section(unsigned long start_pfn) | |
95 | { | |
96 | unsigned long *usemap, mapsize, section_nr, i; | |
97 | struct mem_section *ms; | |
98 | struct page *page, *memmap; | |
99 | ||
100 | if (!pfn_valid(start_pfn)) | |
101 | return; | |
102 | ||
103 | section_nr = pfn_to_section_nr(start_pfn); | |
104 | ms = __nr_to_section(section_nr); | |
105 | ||
106 | /* Get section's memmap address */ | |
107 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
108 | ||
109 | /* | |
110 | * Get page for the memmap's phys address | |
111 | * XXX: need more consideration for sparse_vmemmap... | |
112 | */ | |
113 | page = virt_to_page(memmap); | |
114 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; | |
115 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; | |
116 | ||
117 | /* remember memmap's page */ | |
118 | for (i = 0; i < mapsize; i++, page++) | |
119 | get_page_bootmem(section_nr, page, SECTION_INFO); | |
120 | ||
121 | usemap = __nr_to_section(section_nr)->pageblock_flags; | |
122 | page = virt_to_page(usemap); | |
123 | ||
124 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; | |
125 | ||
126 | for (i = 0; i < mapsize; i++, page++) | |
127 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); | |
128 | ||
129 | } | |
130 | ||
131 | void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
132 | { | |
133 | unsigned long i, pfn, end_pfn, nr_pages; | |
134 | int node = pgdat->node_id; | |
135 | struct page *page; | |
136 | struct zone *zone; | |
137 | ||
138 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; | |
139 | page = virt_to_page(pgdat); | |
140 | ||
141 | for (i = 0; i < nr_pages; i++, page++) | |
142 | get_page_bootmem(node, page, NODE_INFO); | |
143 | ||
144 | zone = &pgdat->node_zones[0]; | |
145 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { | |
146 | if (zone->wait_table) { | |
147 | nr_pages = zone->wait_table_hash_nr_entries | |
148 | * sizeof(wait_queue_head_t); | |
149 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; | |
150 | page = virt_to_page(zone->wait_table); | |
151 | ||
152 | for (i = 0; i < nr_pages; i++, page++) | |
153 | get_page_bootmem(node, page, NODE_INFO); | |
154 | } | |
155 | } | |
156 | ||
157 | pfn = pgdat->node_start_pfn; | |
158 | end_pfn = pfn + pgdat->node_spanned_pages; | |
159 | ||
160 | /* register_section info */ | |
161 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) | |
162 | register_page_bootmem_info_section(pfn); | |
163 | ||
164 | } | |
165 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
166 | ||
167 | static void grow_zone_span(struct zone *zone, unsigned long start_pfn, | |
168 | unsigned long end_pfn) | |
169 | { | |
170 | unsigned long old_zone_end_pfn; | |
171 | ||
172 | zone_span_writelock(zone); | |
173 | ||
174 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
175 | if (start_pfn < zone->zone_start_pfn) | |
176 | zone->zone_start_pfn = start_pfn; | |
177 | ||
178 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - | |
179 | zone->zone_start_pfn; | |
180 | ||
181 | zone_span_writeunlock(zone); | |
182 | } | |
183 | ||
184 | static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, | |
185 | unsigned long end_pfn) | |
186 | { | |
187 | unsigned long old_pgdat_end_pfn = | |
188 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | |
189 | ||
190 | if (start_pfn < pgdat->node_start_pfn) | |
191 | pgdat->node_start_pfn = start_pfn; | |
192 | ||
193 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - | |
194 | pgdat->node_start_pfn; | |
195 | } | |
196 | ||
197 | static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) | |
198 | { | |
199 | struct pglist_data *pgdat = zone->zone_pgdat; | |
200 | int nr_pages = PAGES_PER_SECTION; | |
201 | int nid = pgdat->node_id; | |
202 | int zone_type; | |
203 | unsigned long flags; | |
204 | ||
205 | zone_type = zone - pgdat->node_zones; | |
206 | if (!zone->wait_table) { | |
207 | int ret; | |
208 | ||
209 | ret = init_currently_empty_zone(zone, phys_start_pfn, | |
210 | nr_pages, MEMMAP_HOTPLUG); | |
211 | if (ret) | |
212 | return ret; | |
213 | } | |
214 | pgdat_resize_lock(zone->zone_pgdat, &flags); | |
215 | grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); | |
216 | grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, | |
217 | phys_start_pfn + nr_pages); | |
218 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | |
219 | memmap_init_zone(nr_pages, nid, zone_type, | |
220 | phys_start_pfn, MEMMAP_HOTPLUG); | |
221 | return 0; | |
222 | } | |
223 | ||
224 | static int __meminit __add_section(int nid, struct zone *zone, | |
225 | unsigned long phys_start_pfn) | |
226 | { | |
227 | int nr_pages = PAGES_PER_SECTION; | |
228 | int ret; | |
229 | ||
230 | if (pfn_valid(phys_start_pfn)) | |
231 | return -EEXIST; | |
232 | ||
233 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); | |
234 | ||
235 | if (ret < 0) | |
236 | return ret; | |
237 | ||
238 | ret = __add_zone(zone, phys_start_pfn); | |
239 | ||
240 | if (ret < 0) | |
241 | return ret; | |
242 | ||
243 | return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); | |
244 | } | |
245 | ||
246 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
247 | static int __remove_section(struct zone *zone, struct mem_section *ms) | |
248 | { | |
249 | /* | |
250 | * XXX: Freeing memmap with vmemmap is not implement yet. | |
251 | * This should be removed later. | |
252 | */ | |
253 | return -EBUSY; | |
254 | } | |
255 | #else | |
256 | static int __remove_section(struct zone *zone, struct mem_section *ms) | |
257 | { | |
258 | unsigned long flags; | |
259 | struct pglist_data *pgdat = zone->zone_pgdat; | |
260 | int ret = -EINVAL; | |
261 | ||
262 | if (!valid_section(ms)) | |
263 | return ret; | |
264 | ||
265 | ret = unregister_memory_section(ms); | |
266 | if (ret) | |
267 | return ret; | |
268 | ||
269 | pgdat_resize_lock(pgdat, &flags); | |
270 | sparse_remove_one_section(zone, ms); | |
271 | pgdat_resize_unlock(pgdat, &flags); | |
272 | return 0; | |
273 | } | |
274 | #endif | |
275 | ||
276 | /* | |
277 | * Reasonably generic function for adding memory. It is | |
278 | * expected that archs that support memory hotplug will | |
279 | * call this function after deciding the zone to which to | |
280 | * add the new pages. | |
281 | */ | |
282 | int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, | |
283 | unsigned long nr_pages) | |
284 | { | |
285 | unsigned long i; | |
286 | int err = 0; | |
287 | int start_sec, end_sec; | |
288 | /* during initialize mem_map, align hot-added range to section */ | |
289 | start_sec = pfn_to_section_nr(phys_start_pfn); | |
290 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | |
291 | ||
292 | for (i = start_sec; i <= end_sec; i++) { | |
293 | err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); | |
294 | ||
295 | /* | |
296 | * EEXIST is finally dealt with by ioresource collision | |
297 | * check. see add_memory() => register_memory_resource() | |
298 | * Warning will be printed if there is collision. | |
299 | */ | |
300 | if (err && (err != -EEXIST)) | |
301 | break; | |
302 | err = 0; | |
303 | } | |
304 | ||
305 | return err; | |
306 | } | |
307 | EXPORT_SYMBOL_GPL(__add_pages); | |
308 | ||
309 | /** | |
310 | * __remove_pages() - remove sections of pages from a zone | |
311 | * @zone: zone from which pages need to be removed | |
312 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) | |
313 | * @nr_pages: number of pages to remove (must be multiple of section size) | |
314 | * | |
315 | * Generic helper function to remove section mappings and sysfs entries | |
316 | * for the section of the memory we are removing. Caller needs to make | |
317 | * sure that pages are marked reserved and zones are adjust properly by | |
318 | * calling offline_pages(). | |
319 | */ | |
320 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |
321 | unsigned long nr_pages) | |
322 | { | |
323 | unsigned long i, ret = 0; | |
324 | int sections_to_remove; | |
325 | ||
326 | /* | |
327 | * We can only remove entire sections | |
328 | */ | |
329 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); | |
330 | BUG_ON(nr_pages % PAGES_PER_SECTION); | |
331 | ||
332 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | |
333 | for (i = 0; i < sections_to_remove; i++) { | |
334 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; | |
335 | release_mem_region(pfn << PAGE_SHIFT, | |
336 | PAGES_PER_SECTION << PAGE_SHIFT); | |
337 | ret = __remove_section(zone, __pfn_to_section(pfn)); | |
338 | if (ret) | |
339 | break; | |
340 | } | |
341 | return ret; | |
342 | } | |
343 | EXPORT_SYMBOL_GPL(__remove_pages); | |
344 | ||
345 | void online_page(struct page *page) | |
346 | { | |
347 | unsigned long pfn = page_to_pfn(page); | |
348 | ||
349 | totalram_pages++; | |
350 | if (pfn >= num_physpages) | |
351 | num_physpages = pfn + 1; | |
352 | ||
353 | #ifdef CONFIG_HIGHMEM | |
354 | if (PageHighMem(page)) | |
355 | totalhigh_pages++; | |
356 | #endif | |
357 | ||
358 | #ifdef CONFIG_FLATMEM | |
359 | max_mapnr = max(page_to_pfn(page), max_mapnr); | |
360 | #endif | |
361 | ||
362 | ClearPageReserved(page); | |
363 | init_page_count(page); | |
364 | __free_page(page); | |
365 | } | |
366 | ||
367 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | |
368 | void *arg) | |
369 | { | |
370 | unsigned long i; | |
371 | unsigned long onlined_pages = *(unsigned long *)arg; | |
372 | struct page *page; | |
373 | if (PageReserved(pfn_to_page(start_pfn))) | |
374 | for (i = 0; i < nr_pages; i++) { | |
375 | page = pfn_to_page(start_pfn + i); | |
376 | online_page(page); | |
377 | onlined_pages++; | |
378 | } | |
379 | *(unsigned long *)arg = onlined_pages; | |
380 | return 0; | |
381 | } | |
382 | ||
383 | ||
384 | int online_pages(unsigned long pfn, unsigned long nr_pages) | |
385 | { | |
386 | unsigned long onlined_pages = 0; | |
387 | struct zone *zone; | |
388 | int need_zonelists_rebuild = 0; | |
389 | int nid; | |
390 | int ret; | |
391 | struct memory_notify arg; | |
392 | ||
393 | arg.start_pfn = pfn; | |
394 | arg.nr_pages = nr_pages; | |
395 | arg.status_change_nid = -1; | |
396 | ||
397 | nid = page_to_nid(pfn_to_page(pfn)); | |
398 | if (node_present_pages(nid) == 0) | |
399 | arg.status_change_nid = nid; | |
400 | ||
401 | ret = memory_notify(MEM_GOING_ONLINE, &arg); | |
402 | ret = notifier_to_errno(ret); | |
403 | if (ret) { | |
404 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
405 | return ret; | |
406 | } | |
407 | /* | |
408 | * This doesn't need a lock to do pfn_to_page(). | |
409 | * The section can't be removed here because of the | |
410 | * memory_block->state_mutex. | |
411 | */ | |
412 | zone = page_zone(pfn_to_page(pfn)); | |
413 | /* | |
414 | * If this zone is not populated, then it is not in zonelist. | |
415 | * This means the page allocator ignores this zone. | |
416 | * So, zonelist must be updated after online. | |
417 | */ | |
418 | mutex_lock(&zonelists_mutex); | |
419 | if (!populated_zone(zone)) | |
420 | need_zonelists_rebuild = 1; | |
421 | ||
422 | ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, | |
423 | online_pages_range); | |
424 | if (ret) { | |
425 | mutex_unlock(&zonelists_mutex); | |
426 | printk(KERN_DEBUG "online_pages %lx at %lx failed\n", | |
427 | nr_pages, pfn); | |
428 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
429 | return ret; | |
430 | } | |
431 | ||
432 | zone->present_pages += onlined_pages; | |
433 | zone->zone_pgdat->node_present_pages += onlined_pages; | |
434 | if (need_zonelists_rebuild) | |
435 | build_all_zonelists(zone); | |
436 | else | |
437 | zone_pcp_update(zone); | |
438 | ||
439 | mutex_unlock(&zonelists_mutex); | |
440 | setup_per_zone_wmarks(); | |
441 | calculate_zone_inactive_ratio(zone); | |
442 | if (onlined_pages) { | |
443 | kswapd_run(zone_to_nid(zone)); | |
444 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); | |
445 | } | |
446 | ||
447 | vm_total_pages = nr_free_pagecache_pages(); | |
448 | ||
449 | writeback_set_ratelimit(); | |
450 | ||
451 | if (onlined_pages) | |
452 | memory_notify(MEM_ONLINE, &arg); | |
453 | ||
454 | return 0; | |
455 | } | |
456 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | |
457 | ||
458 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | |
459 | static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |
460 | { | |
461 | struct pglist_data *pgdat; | |
462 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | |
463 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | |
464 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
465 | ||
466 | pgdat = arch_alloc_nodedata(nid); | |
467 | if (!pgdat) | |
468 | return NULL; | |
469 | ||
470 | arch_refresh_nodedata(nid, pgdat); | |
471 | ||
472 | /* we can use NODE_DATA(nid) from here */ | |
473 | ||
474 | /* init node's zones as empty zones, we don't have any present pages.*/ | |
475 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); | |
476 | ||
477 | return pgdat; | |
478 | } | |
479 | ||
480 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | |
481 | { | |
482 | arch_refresh_nodedata(nid, NULL); | |
483 | arch_free_nodedata(pgdat); | |
484 | return; | |
485 | } | |
486 | ||
487 | ||
488 | /* | |
489 | * called by cpu_up() to online a node without onlined memory. | |
490 | */ | |
491 | int mem_online_node(int nid) | |
492 | { | |
493 | pg_data_t *pgdat; | |
494 | int ret; | |
495 | ||
496 | lock_system_sleep(); | |
497 | pgdat = hotadd_new_pgdat(nid, 0); | |
498 | if (pgdat) { | |
499 | ret = -ENOMEM; | |
500 | goto out; | |
501 | } | |
502 | node_set_online(nid); | |
503 | ret = register_one_node(nid); | |
504 | BUG_ON(ret); | |
505 | ||
506 | out: | |
507 | unlock_system_sleep(); | |
508 | return ret; | |
509 | } | |
510 | ||
511 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ | |
512 | int __ref add_memory(int nid, u64 start, u64 size) | |
513 | { | |
514 | pg_data_t *pgdat = NULL; | |
515 | int new_pgdat = 0; | |
516 | struct resource *res; | |
517 | int ret; | |
518 | ||
519 | lock_system_sleep(); | |
520 | ||
521 | res = register_memory_resource(start, size); | |
522 | ret = -EEXIST; | |
523 | if (!res) | |
524 | goto out; | |
525 | ||
526 | if (!node_online(nid)) { | |
527 | pgdat = hotadd_new_pgdat(nid, start); | |
528 | ret = -ENOMEM; | |
529 | if (!pgdat) | |
530 | goto out; | |
531 | new_pgdat = 1; | |
532 | } | |
533 | ||
534 | /* call arch's memory hotadd */ | |
535 | ret = arch_add_memory(nid, start, size); | |
536 | ||
537 | if (ret < 0) | |
538 | goto error; | |
539 | ||
540 | /* we online node here. we can't roll back from here. */ | |
541 | node_set_online(nid); | |
542 | ||
543 | if (new_pgdat) { | |
544 | ret = register_one_node(nid); | |
545 | /* | |
546 | * If sysfs file of new node can't create, cpu on the node | |
547 | * can't be hot-added. There is no rollback way now. | |
548 | * So, check by BUG_ON() to catch it reluctantly.. | |
549 | */ | |
550 | BUG_ON(ret); | |
551 | } | |
552 | ||
553 | /* create new memmap entry */ | |
554 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
555 | ||
556 | goto out; | |
557 | ||
558 | error: | |
559 | /* rollback pgdat allocation and others */ | |
560 | if (new_pgdat) | |
561 | rollback_node_hotadd(nid, pgdat); | |
562 | if (res) | |
563 | release_memory_resource(res); | |
564 | ||
565 | out: | |
566 | unlock_system_sleep(); | |
567 | return ret; | |
568 | } | |
569 | EXPORT_SYMBOL_GPL(add_memory); | |
570 | ||
571 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
572 | /* | |
573 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy | |
574 | * set and the size of the free page is given by page_order(). Using this, | |
575 | * the function determines if the pageblock contains only free pages. | |
576 | * Due to buddy contraints, a free page at least the size of a pageblock will | |
577 | * be located at the start of the pageblock | |
578 | */ | |
579 | static inline int pageblock_free(struct page *page) | |
580 | { | |
581 | return PageBuddy(page) && page_order(page) >= pageblock_order; | |
582 | } | |
583 | ||
584 | /* Return the start of the next active pageblock after a given page */ | |
585 | static struct page *next_active_pageblock(struct page *page) | |
586 | { | |
587 | /* Ensure the starting page is pageblock-aligned */ | |
588 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | |
589 | ||
590 | /* If the entire pageblock is free, move to the end of free page */ | |
591 | if (pageblock_free(page)) { | |
592 | int order; | |
593 | /* be careful. we don't have locks, page_order can be changed.*/ | |
594 | order = page_order(page); | |
595 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | |
596 | return page + (1 << order); | |
597 | } | |
598 | ||
599 | return page + pageblock_nr_pages; | |
600 | } | |
601 | ||
602 | /* Checks if this range of memory is likely to be hot-removable. */ | |
603 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | |
604 | { | |
605 | struct page *page = pfn_to_page(start_pfn); | |
606 | struct page *end_page = page + nr_pages; | |
607 | ||
608 | /* Check the starting page of each pageblock within the range */ | |
609 | for (; page < end_page; page = next_active_pageblock(page)) { | |
610 | if (!is_pageblock_removable_nolock(page)) | |
611 | return 0; | |
612 | cond_resched(); | |
613 | } | |
614 | ||
615 | /* All pageblocks in the memory block are likely to be hot-removable */ | |
616 | return 1; | |
617 | } | |
618 | ||
619 | /* | |
620 | * Confirm all pages in a range [start, end) is belongs to the same zone. | |
621 | */ | |
622 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |
623 | { | |
624 | unsigned long pfn; | |
625 | struct zone *zone = NULL; | |
626 | struct page *page; | |
627 | int i; | |
628 | for (pfn = start_pfn; | |
629 | pfn < end_pfn; | |
630 | pfn += MAX_ORDER_NR_PAGES) { | |
631 | i = 0; | |
632 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
633 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | |
634 | i++; | |
635 | if (i == MAX_ORDER_NR_PAGES) | |
636 | continue; | |
637 | page = pfn_to_page(pfn + i); | |
638 | if (zone && page_zone(page) != zone) | |
639 | return 0; | |
640 | zone = page_zone(page); | |
641 | } | |
642 | return 1; | |
643 | } | |
644 | ||
645 | /* | |
646 | * Scanning pfn is much easier than scanning lru list. | |
647 | * Scan pfn from start to end and Find LRU page. | |
648 | */ | |
649 | static unsigned long scan_lru_pages(unsigned long start, unsigned long end) | |
650 | { | |
651 | unsigned long pfn; | |
652 | struct page *page; | |
653 | for (pfn = start; pfn < end; pfn++) { | |
654 | if (pfn_valid(pfn)) { | |
655 | page = pfn_to_page(pfn); | |
656 | if (PageLRU(page)) | |
657 | return pfn; | |
658 | } | |
659 | } | |
660 | return 0; | |
661 | } | |
662 | ||
663 | static struct page * | |
664 | hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) | |
665 | { | |
666 | /* This should be improooooved!! */ | |
667 | return alloc_page(GFP_HIGHUSER_MOVABLE); | |
668 | } | |
669 | ||
670 | #define NR_OFFLINE_AT_ONCE_PAGES (256) | |
671 | static int | |
672 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
673 | { | |
674 | unsigned long pfn; | |
675 | struct page *page; | |
676 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | |
677 | int not_managed = 0; | |
678 | int ret = 0; | |
679 | LIST_HEAD(source); | |
680 | ||
681 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | |
682 | if (!pfn_valid(pfn)) | |
683 | continue; | |
684 | page = pfn_to_page(pfn); | |
685 | if (!page_count(page)) | |
686 | continue; | |
687 | /* | |
688 | * We can skip free pages. And we can only deal with pages on | |
689 | * LRU. | |
690 | */ | |
691 | ret = isolate_lru_page(page); | |
692 | if (!ret) { /* Success */ | |
693 | list_add_tail(&page->lru, &source); | |
694 | move_pages--; | |
695 | inc_zone_page_state(page, NR_ISOLATED_ANON + | |
696 | page_is_file_cache(page)); | |
697 | ||
698 | } else { | |
699 | #ifdef CONFIG_DEBUG_VM | |
700 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", | |
701 | pfn); | |
702 | dump_page(page); | |
703 | #endif | |
704 | /* Becasue we don't have big zone->lock. we should | |
705 | check this again here. */ | |
706 | if (page_count(page)) { | |
707 | not_managed++; | |
708 | ret = -EBUSY; | |
709 | break; | |
710 | } | |
711 | } | |
712 | } | |
713 | if (!list_empty(&source)) { | |
714 | if (not_managed) { | |
715 | putback_lru_pages(&source); | |
716 | goto out; | |
717 | } | |
718 | /* this function returns # of failed pages */ | |
719 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); | |
720 | if (ret) | |
721 | putback_lru_pages(&source); | |
722 | } | |
723 | out: | |
724 | return ret; | |
725 | } | |
726 | ||
727 | /* | |
728 | * remove from free_area[] and mark all as Reserved. | |
729 | */ | |
730 | static int | |
731 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | |
732 | void *data) | |
733 | { | |
734 | __offline_isolated_pages(start, start + nr_pages); | |
735 | return 0; | |
736 | } | |
737 | ||
738 | static void | |
739 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | |
740 | { | |
741 | walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, | |
742 | offline_isolated_pages_cb); | |
743 | } | |
744 | ||
745 | /* | |
746 | * Check all pages in range, recoreded as memory resource, are isolated. | |
747 | */ | |
748 | static int | |
749 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | |
750 | void *data) | |
751 | { | |
752 | int ret; | |
753 | long offlined = *(long *)data; | |
754 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); | |
755 | offlined = nr_pages; | |
756 | if (!ret) | |
757 | *(long *)data += offlined; | |
758 | return ret; | |
759 | } | |
760 | ||
761 | static long | |
762 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |
763 | { | |
764 | long offlined = 0; | |
765 | int ret; | |
766 | ||
767 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, | |
768 | check_pages_isolated_cb); | |
769 | if (ret < 0) | |
770 | offlined = (long)ret; | |
771 | return offlined; | |
772 | } | |
773 | ||
774 | static int offline_pages(unsigned long start_pfn, | |
775 | unsigned long end_pfn, unsigned long timeout) | |
776 | { | |
777 | unsigned long pfn, nr_pages, expire; | |
778 | long offlined_pages; | |
779 | int ret, drain, retry_max, node; | |
780 | struct zone *zone; | |
781 | struct memory_notify arg; | |
782 | ||
783 | BUG_ON(start_pfn >= end_pfn); | |
784 | /* at least, alignment against pageblock is necessary */ | |
785 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | |
786 | return -EINVAL; | |
787 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | |
788 | return -EINVAL; | |
789 | /* This makes hotplug much easier...and readable. | |
790 | we assume this for now. .*/ | |
791 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | |
792 | return -EINVAL; | |
793 | ||
794 | lock_system_sleep(); | |
795 | ||
796 | zone = page_zone(pfn_to_page(start_pfn)); | |
797 | node = zone_to_nid(zone); | |
798 | nr_pages = end_pfn - start_pfn; | |
799 | ||
800 | /* set above range as isolated */ | |
801 | ret = start_isolate_page_range(start_pfn, end_pfn); | |
802 | if (ret) | |
803 | goto out; | |
804 | ||
805 | arg.start_pfn = start_pfn; | |
806 | arg.nr_pages = nr_pages; | |
807 | arg.status_change_nid = -1; | |
808 | if (nr_pages >= node_present_pages(node)) | |
809 | arg.status_change_nid = node; | |
810 | ||
811 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
812 | ret = notifier_to_errno(ret); | |
813 | if (ret) | |
814 | goto failed_removal; | |
815 | ||
816 | pfn = start_pfn; | |
817 | expire = jiffies + timeout; | |
818 | drain = 0; | |
819 | retry_max = 5; | |
820 | repeat: | |
821 | /* start memory hot removal */ | |
822 | ret = -EAGAIN; | |
823 | if (time_after(jiffies, expire)) | |
824 | goto failed_removal; | |
825 | ret = -EINTR; | |
826 | if (signal_pending(current)) | |
827 | goto failed_removal; | |
828 | ret = 0; | |
829 | if (drain) { | |
830 | lru_add_drain_all(); | |
831 | cond_resched(); | |
832 | drain_all_pages(); | |
833 | } | |
834 | ||
835 | pfn = scan_lru_pages(start_pfn, end_pfn); | |
836 | if (pfn) { /* We have page on LRU */ | |
837 | ret = do_migrate_range(pfn, end_pfn); | |
838 | if (!ret) { | |
839 | drain = 1; | |
840 | goto repeat; | |
841 | } else { | |
842 | if (ret < 0) | |
843 | if (--retry_max == 0) | |
844 | goto failed_removal; | |
845 | yield(); | |
846 | drain = 1; | |
847 | goto repeat; | |
848 | } | |
849 | } | |
850 | /* drain all zone's lru pagevec, this is asyncronous... */ | |
851 | lru_add_drain_all(); | |
852 | yield(); | |
853 | /* drain pcp pages , this is synchrouns. */ | |
854 | drain_all_pages(); | |
855 | /* check again */ | |
856 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | |
857 | if (offlined_pages < 0) { | |
858 | ret = -EBUSY; | |
859 | goto failed_removal; | |
860 | } | |
861 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | |
862 | /* Ok, all of our target is islaoted. | |
863 | We cannot do rollback at this point. */ | |
864 | offline_isolated_pages(start_pfn, end_pfn); | |
865 | /* reset pagetype flags and makes migrate type to be MOVABLE */ | |
866 | undo_isolate_page_range(start_pfn, end_pfn); | |
867 | /* removal success */ | |
868 | zone->present_pages -= offlined_pages; | |
869 | zone->zone_pgdat->node_present_pages -= offlined_pages; | |
870 | totalram_pages -= offlined_pages; | |
871 | ||
872 | setup_per_zone_wmarks(); | |
873 | calculate_zone_inactive_ratio(zone); | |
874 | if (!node_present_pages(node)) { | |
875 | node_clear_state(node, N_HIGH_MEMORY); | |
876 | kswapd_stop(node); | |
877 | } | |
878 | ||
879 | vm_total_pages = nr_free_pagecache_pages(); | |
880 | writeback_set_ratelimit(); | |
881 | ||
882 | memory_notify(MEM_OFFLINE, &arg); | |
883 | unlock_system_sleep(); | |
884 | return 0; | |
885 | ||
886 | failed_removal: | |
887 | printk(KERN_INFO "memory offlining %lx to %lx failed\n", | |
888 | start_pfn, end_pfn); | |
889 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | |
890 | /* pushback to free area */ | |
891 | undo_isolate_page_range(start_pfn, end_pfn); | |
892 | ||
893 | out: | |
894 | unlock_system_sleep(); | |
895 | return ret; | |
896 | } | |
897 | ||
898 | int remove_memory(u64 start, u64 size) | |
899 | { | |
900 | unsigned long start_pfn, end_pfn; | |
901 | ||
902 | start_pfn = PFN_DOWN(start); | |
903 | end_pfn = start_pfn + PFN_DOWN(size); | |
904 | return offline_pages(start_pfn, end_pfn, 120 * HZ); | |
905 | } | |
906 | #else | |
907 | int remove_memory(u64 start, u64 size) | |
908 | { | |
909 | return -EINVAL; | |
910 | } | |
911 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
912 | EXPORT_SYMBOL_GPL(remove_memory); |