]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * sparse memory mappings. | |
3 | */ | |
4 | #include <linux/mm.h> | |
5 | #include <linux/slab.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/bootmem.h> | |
8 | #include <linux/highmem.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include "internal.h" | |
13 | #include <asm/dma.h> | |
14 | #include <asm/pgalloc.h> | |
15 | #include <asm/pgtable.h> | |
16 | ||
17 | /* | |
18 | * Permanent SPARSEMEM data: | |
19 | * | |
20 | * 1) mem_section - memory sections, mem_map's for valid memory | |
21 | */ | |
22 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
23 | struct mem_section *mem_section[NR_SECTION_ROOTS] | |
24 | ____cacheline_internodealigned_in_smp; | |
25 | #else | |
26 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
27 | ____cacheline_internodealigned_in_smp; | |
28 | #endif | |
29 | EXPORT_SYMBOL(mem_section); | |
30 | ||
31 | #ifdef NODE_NOT_IN_PAGE_FLAGS | |
32 | /* | |
33 | * If we did not store the node number in the page then we have to | |
34 | * do a lookup in the section_to_node_table in order to find which | |
35 | * node the page belongs to. | |
36 | */ | |
37 | #if MAX_NUMNODES <= 256 | |
38 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
39 | #else | |
40 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
41 | #endif | |
42 | ||
43 | int page_to_nid(struct page *page) | |
44 | { | |
45 | return section_to_node_table[page_to_section(page)]; | |
46 | } | |
47 | EXPORT_SYMBOL(page_to_nid); | |
48 | ||
49 | static void set_section_nid(unsigned long section_nr, int nid) | |
50 | { | |
51 | section_to_node_table[section_nr] = nid; | |
52 | } | |
53 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | |
54 | static inline void set_section_nid(unsigned long section_nr, int nid) | |
55 | { | |
56 | } | |
57 | #endif | |
58 | ||
59 | #ifdef CONFIG_SPARSEMEM_EXTREME | |
60 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | |
61 | { | |
62 | struct mem_section *section = NULL; | |
63 | unsigned long array_size = SECTIONS_PER_ROOT * | |
64 | sizeof(struct mem_section); | |
65 | ||
66 | if (slab_is_available()) { | |
67 | if (node_state(nid, N_HIGH_MEMORY)) | |
68 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | |
69 | else | |
70 | section = kmalloc(array_size, GFP_KERNEL); | |
71 | } else | |
72 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | |
73 | ||
74 | if (section) | |
75 | memset(section, 0, array_size); | |
76 | ||
77 | return section; | |
78 | } | |
79 | ||
80 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | |
81 | { | |
82 | static DEFINE_SPINLOCK(index_init_lock); | |
83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | |
84 | struct mem_section *section; | |
85 | int ret = 0; | |
86 | ||
87 | if (mem_section[root]) | |
88 | return -EEXIST; | |
89 | ||
90 | section = sparse_index_alloc(nid); | |
91 | if (!section) | |
92 | return -ENOMEM; | |
93 | /* | |
94 | * This lock keeps two different sections from | |
95 | * reallocating for the same index | |
96 | */ | |
97 | spin_lock(&index_init_lock); | |
98 | ||
99 | if (mem_section[root]) { | |
100 | ret = -EEXIST; | |
101 | goto out; | |
102 | } | |
103 | ||
104 | mem_section[root] = section; | |
105 | out: | |
106 | spin_unlock(&index_init_lock); | |
107 | return ret; | |
108 | } | |
109 | #else /* !SPARSEMEM_EXTREME */ | |
110 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
111 | { | |
112 | return 0; | |
113 | } | |
114 | #endif | |
115 | ||
116 | /* | |
117 | * Although written for the SPARSEMEM_EXTREME case, this happens | |
118 | * to also work for the flat array case because | |
119 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. | |
120 | */ | |
121 | int __section_nr(struct mem_section* ms) | |
122 | { | |
123 | unsigned long root_nr; | |
124 | struct mem_section* root; | |
125 | ||
126 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | |
127 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | |
128 | if (!root) | |
129 | continue; | |
130 | ||
131 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
132 | break; | |
133 | } | |
134 | ||
135 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | |
136 | } | |
137 | ||
138 | /* | |
139 | * During early boot, before section_mem_map is used for an actual | |
140 | * mem_map, we use section_mem_map to store the section's NUMA | |
141 | * node. This keeps us from having to use another data structure. The | |
142 | * node information is cleared just before we store the real mem_map. | |
143 | */ | |
144 | static inline unsigned long sparse_encode_early_nid(int nid) | |
145 | { | |
146 | return (nid << SECTION_NID_SHIFT); | |
147 | } | |
148 | ||
149 | static inline int sparse_early_nid(struct mem_section *section) | |
150 | { | |
151 | return (section->section_mem_map >> SECTION_NID_SHIFT); | |
152 | } | |
153 | ||
154 | /* Validate the physical addressing limitations of the model */ | |
155 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
156 | unsigned long *end_pfn) | |
157 | { | |
158 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); | |
159 | ||
160 | /* | |
161 | * Sanity checks - do not allow an architecture to pass | |
162 | * in larger pfns than the maximum scope of sparsemem: | |
163 | */ | |
164 | if (*start_pfn > max_sparsemem_pfn) { | |
165 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
166 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
167 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
168 | WARN_ON_ONCE(1); | |
169 | *start_pfn = max_sparsemem_pfn; | |
170 | *end_pfn = max_sparsemem_pfn; | |
171 | } else if (*end_pfn > max_sparsemem_pfn) { | |
172 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
173 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
174 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
175 | WARN_ON_ONCE(1); | |
176 | *end_pfn = max_sparsemem_pfn; | |
177 | } | |
178 | } | |
179 | ||
180 | /* Record a memory area against a node. */ | |
181 | void __init memory_present(int nid, unsigned long start, unsigned long end) | |
182 | { | |
183 | unsigned long pfn; | |
184 | ||
185 | start &= PAGE_SECTION_MASK; | |
186 | mminit_validate_memmodel_limits(&start, &end); | |
187 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | |
188 | unsigned long section = pfn_to_section_nr(pfn); | |
189 | struct mem_section *ms; | |
190 | ||
191 | sparse_index_init(section, nid); | |
192 | set_section_nid(section, nid); | |
193 | ||
194 | ms = __nr_to_section(section); | |
195 | if (!ms->section_mem_map) | |
196 | ms->section_mem_map = sparse_encode_early_nid(nid) | | |
197 | SECTION_MARKED_PRESENT; | |
198 | } | |
199 | } | |
200 | ||
201 | /* | |
202 | * Only used by the i386 NUMA architecures, but relatively | |
203 | * generic code. | |
204 | */ | |
205 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | |
206 | unsigned long end_pfn) | |
207 | { | |
208 | unsigned long pfn; | |
209 | unsigned long nr_pages = 0; | |
210 | ||
211 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); | |
212 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
213 | if (nid != early_pfn_to_nid(pfn)) | |
214 | continue; | |
215 | ||
216 | if (pfn_present(pfn)) | |
217 | nr_pages += PAGES_PER_SECTION; | |
218 | } | |
219 | ||
220 | return nr_pages * sizeof(struct page); | |
221 | } | |
222 | ||
223 | /* | |
224 | * Subtle, we encode the real pfn into the mem_map such that | |
225 | * the identity pfn - section_mem_map will return the actual | |
226 | * physical page frame number. | |
227 | */ | |
228 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
229 | { | |
230 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
231 | } | |
232 | ||
233 | /* | |
234 | * Decode mem_map from the coded memmap | |
235 | */ | |
236 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | |
237 | { | |
238 | /* mask off the extra low bits of information */ | |
239 | coded_mem_map &= SECTION_MAP_MASK; | |
240 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | |
241 | } | |
242 | ||
243 | static int __meminit sparse_init_one_section(struct mem_section *ms, | |
244 | unsigned long pnum, struct page *mem_map, | |
245 | unsigned long *pageblock_bitmap) | |
246 | { | |
247 | if (!present_section(ms)) | |
248 | return -EINVAL; | |
249 | ||
250 | ms->section_mem_map &= ~SECTION_MAP_MASK; | |
251 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | | |
252 | SECTION_HAS_MEM_MAP; | |
253 | ms->pageblock_flags = pageblock_bitmap; | |
254 | ||
255 | return 1; | |
256 | } | |
257 | ||
258 | unsigned long usemap_size(void) | |
259 | { | |
260 | unsigned long size_bytes; | |
261 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; | |
262 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); | |
263 | return size_bytes; | |
264 | } | |
265 | ||
266 | #ifdef CONFIG_MEMORY_HOTPLUG | |
267 | static unsigned long *__kmalloc_section_usemap(void) | |
268 | { | |
269 | return kmalloc(usemap_size(), GFP_KERNEL); | |
270 | } | |
271 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
272 | ||
273 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
274 | static unsigned long * __init | |
275 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | |
276 | unsigned long count) | |
277 | { | |
278 | unsigned long section_nr; | |
279 | ||
280 | /* | |
281 | * A page may contain usemaps for other sections preventing the | |
282 | * page being freed and making a section unremovable while | |
283 | * other sections referencing the usemap retmain active. Similarly, | |
284 | * a pgdat can prevent a section being removed. If section A | |
285 | * contains a pgdat and section B contains the usemap, both | |
286 | * sections become inter-dependent. This allocates usemaps | |
287 | * from the same section as the pgdat where possible to avoid | |
288 | * this problem. | |
289 | */ | |
290 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | |
291 | return alloc_bootmem_section(usemap_size() * count, section_nr); | |
292 | } | |
293 | ||
294 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
295 | { | |
296 | unsigned long usemap_snr, pgdat_snr; | |
297 | static unsigned long old_usemap_snr = NR_MEM_SECTIONS; | |
298 | static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; | |
299 | struct pglist_data *pgdat = NODE_DATA(nid); | |
300 | int usemap_nid; | |
301 | ||
302 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | |
303 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | |
304 | if (usemap_snr == pgdat_snr) | |
305 | return; | |
306 | ||
307 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | |
308 | /* skip redundant message */ | |
309 | return; | |
310 | ||
311 | old_usemap_snr = usemap_snr; | |
312 | old_pgdat_snr = pgdat_snr; | |
313 | ||
314 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | |
315 | if (usemap_nid != nid) { | |
316 | printk(KERN_INFO | |
317 | "node %d must be removed before remove section %ld\n", | |
318 | nid, usemap_snr); | |
319 | return; | |
320 | } | |
321 | /* | |
322 | * There is a circular dependency. | |
323 | * Some platforms allow un-removable section because they will just | |
324 | * gather other removable sections for dynamic partitioning. | |
325 | * Just notify un-removable section's number here. | |
326 | */ | |
327 | printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, | |
328 | pgdat_snr, nid); | |
329 | printk(KERN_CONT | |
330 | " have a circular dependency on usemap and pgdat allocations\n"); | |
331 | } | |
332 | #else | |
333 | static unsigned long * __init | |
334 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | |
335 | unsigned long count) | |
336 | { | |
337 | return NULL; | |
338 | } | |
339 | ||
340 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
341 | { | |
342 | } | |
343 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
344 | ||
345 | static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, | |
346 | unsigned long pnum_begin, | |
347 | unsigned long pnum_end, | |
348 | unsigned long usemap_count, int nodeid) | |
349 | { | |
350 | void *usemap; | |
351 | unsigned long pnum; | |
352 | int size = usemap_size(); | |
353 | ||
354 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), | |
355 | usemap_count); | |
356 | if (usemap) { | |
357 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
358 | if (!present_section_nr(pnum)) | |
359 | continue; | |
360 | usemap_map[pnum] = usemap; | |
361 | usemap += size; | |
362 | } | |
363 | return; | |
364 | } | |
365 | ||
366 | usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); | |
367 | if (usemap) { | |
368 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
369 | if (!present_section_nr(pnum)) | |
370 | continue; | |
371 | usemap_map[pnum] = usemap; | |
372 | usemap += size; | |
373 | check_usemap_section_nr(nodeid, usemap_map[pnum]); | |
374 | } | |
375 | return; | |
376 | } | |
377 | ||
378 | printk(KERN_WARNING "%s: allocation failed\n", __func__); | |
379 | } | |
380 | ||
381 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
382 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | |
383 | { | |
384 | struct page *map; | |
385 | unsigned long size; | |
386 | ||
387 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | |
388 | if (map) | |
389 | return map; | |
390 | ||
391 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | |
392 | map = __alloc_bootmem_node_high(NODE_DATA(nid), size, | |
393 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | |
394 | return map; | |
395 | } | |
396 | void __init sparse_mem_maps_populate_node(struct page **map_map, | |
397 | unsigned long pnum_begin, | |
398 | unsigned long pnum_end, | |
399 | unsigned long map_count, int nodeid) | |
400 | { | |
401 | void *map; | |
402 | unsigned long pnum; | |
403 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | |
404 | ||
405 | map = alloc_remap(nodeid, size * map_count); | |
406 | if (map) { | |
407 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
408 | if (!present_section_nr(pnum)) | |
409 | continue; | |
410 | map_map[pnum] = map; | |
411 | map += size; | |
412 | } | |
413 | return; | |
414 | } | |
415 | ||
416 | size = PAGE_ALIGN(size); | |
417 | map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, | |
418 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | |
419 | if (map) { | |
420 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
421 | if (!present_section_nr(pnum)) | |
422 | continue; | |
423 | map_map[pnum] = map; | |
424 | map += size; | |
425 | } | |
426 | return; | |
427 | } | |
428 | ||
429 | /* fallback */ | |
430 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
431 | struct mem_section *ms; | |
432 | ||
433 | if (!present_section_nr(pnum)) | |
434 | continue; | |
435 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | |
436 | if (map_map[pnum]) | |
437 | continue; | |
438 | ms = __nr_to_section(pnum); | |
439 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | |
440 | "some memory will not be available.\n", __func__); | |
441 | ms->section_mem_map = 0; | |
442 | } | |
443 | } | |
444 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
445 | ||
446 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | |
447 | static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, | |
448 | unsigned long pnum_begin, | |
449 | unsigned long pnum_end, | |
450 | unsigned long map_count, int nodeid) | |
451 | { | |
452 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, | |
453 | map_count, nodeid); | |
454 | } | |
455 | #else | |
456 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) | |
457 | { | |
458 | struct page *map; | |
459 | struct mem_section *ms = __nr_to_section(pnum); | |
460 | int nid = sparse_early_nid(ms); | |
461 | ||
462 | map = sparse_mem_map_populate(pnum, nid); | |
463 | if (map) | |
464 | return map; | |
465 | ||
466 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | |
467 | "some memory will not be available.\n", __func__); | |
468 | ms->section_mem_map = 0; | |
469 | return NULL; | |
470 | } | |
471 | #endif | |
472 | ||
473 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) | |
474 | { | |
475 | } | |
476 | ||
477 | /* | |
478 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
479 | * for each and record the physical to section mapping. | |
480 | */ | |
481 | void __init sparse_init(void) | |
482 | { | |
483 | unsigned long pnum; | |
484 | struct page *map; | |
485 | unsigned long *usemap; | |
486 | unsigned long **usemap_map; | |
487 | int size; | |
488 | int nodeid_begin = 0; | |
489 | unsigned long pnum_begin = 0; | |
490 | unsigned long usemap_count; | |
491 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | |
492 | unsigned long map_count; | |
493 | int size2; | |
494 | struct page **map_map; | |
495 | #endif | |
496 | ||
497 | /* | |
498 | * map is using big page (aka 2M in x86 64 bit) | |
499 | * usemap is less one page (aka 24 bytes) | |
500 | * so alloc 2M (with 2M align) and 24 bytes in turn will | |
501 | * make next 2M slip to one more 2M later. | |
502 | * then in big system, the memory will have a lot of holes... | |
503 | * here try to allocate 2M pages continously. | |
504 | * | |
505 | * powerpc need to call sparse_init_one_section right after each | |
506 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. | |
507 | */ | |
508 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; | |
509 | usemap_map = alloc_bootmem(size); | |
510 | if (!usemap_map) | |
511 | panic("can not allocate usemap_map\n"); | |
512 | ||
513 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | |
514 | struct mem_section *ms; | |
515 | ||
516 | if (!present_section_nr(pnum)) | |
517 | continue; | |
518 | ms = __nr_to_section(pnum); | |
519 | nodeid_begin = sparse_early_nid(ms); | |
520 | pnum_begin = pnum; | |
521 | break; | |
522 | } | |
523 | usemap_count = 1; | |
524 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | |
525 | struct mem_section *ms; | |
526 | int nodeid; | |
527 | ||
528 | if (!present_section_nr(pnum)) | |
529 | continue; | |
530 | ms = __nr_to_section(pnum); | |
531 | nodeid = sparse_early_nid(ms); | |
532 | if (nodeid == nodeid_begin) { | |
533 | usemap_count++; | |
534 | continue; | |
535 | } | |
536 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | |
537 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, | |
538 | usemap_count, nodeid_begin); | |
539 | /* new start, update count etc*/ | |
540 | nodeid_begin = nodeid; | |
541 | pnum_begin = pnum; | |
542 | usemap_count = 1; | |
543 | } | |
544 | /* ok, last chunk */ | |
545 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, | |
546 | usemap_count, nodeid_begin); | |
547 | ||
548 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | |
549 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | |
550 | map_map = alloc_bootmem(size2); | |
551 | if (!map_map) | |
552 | panic("can not allocate map_map\n"); | |
553 | ||
554 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | |
555 | struct mem_section *ms; | |
556 | ||
557 | if (!present_section_nr(pnum)) | |
558 | continue; | |
559 | ms = __nr_to_section(pnum); | |
560 | nodeid_begin = sparse_early_nid(ms); | |
561 | pnum_begin = pnum; | |
562 | break; | |
563 | } | |
564 | map_count = 1; | |
565 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | |
566 | struct mem_section *ms; | |
567 | int nodeid; | |
568 | ||
569 | if (!present_section_nr(pnum)) | |
570 | continue; | |
571 | ms = __nr_to_section(pnum); | |
572 | nodeid = sparse_early_nid(ms); | |
573 | if (nodeid == nodeid_begin) { | |
574 | map_count++; | |
575 | continue; | |
576 | } | |
577 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | |
578 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, | |
579 | map_count, nodeid_begin); | |
580 | /* new start, update count etc*/ | |
581 | nodeid_begin = nodeid; | |
582 | pnum_begin = pnum; | |
583 | map_count = 1; | |
584 | } | |
585 | /* ok, last chunk */ | |
586 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, | |
587 | map_count, nodeid_begin); | |
588 | #endif | |
589 | ||
590 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | |
591 | if (!present_section_nr(pnum)) | |
592 | continue; | |
593 | ||
594 | usemap = usemap_map[pnum]; | |
595 | if (!usemap) | |
596 | continue; | |
597 | ||
598 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | |
599 | map = map_map[pnum]; | |
600 | #else | |
601 | map = sparse_early_mem_map_alloc(pnum); | |
602 | #endif | |
603 | if (!map) | |
604 | continue; | |
605 | ||
606 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, | |
607 | usemap); | |
608 | } | |
609 | ||
610 | vmemmap_populate_print_last(); | |
611 | ||
612 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER | |
613 | free_bootmem(__pa(map_map), size2); | |
614 | #endif | |
615 | free_bootmem(__pa(usemap_map), size); | |
616 | } | |
617 | ||
618 | #ifdef CONFIG_MEMORY_HOTPLUG | |
619 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
620 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | |
621 | unsigned long nr_pages) | |
622 | { | |
623 | /* This will make the necessary allocations eventually. */ | |
624 | return sparse_mem_map_populate(pnum, nid); | |
625 | } | |
626 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |
627 | { | |
628 | return; /* XXX: Not implemented yet */ | |
629 | } | |
630 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | |
631 | { | |
632 | } | |
633 | #else | |
634 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | |
635 | { | |
636 | struct page *page, *ret; | |
637 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | |
638 | ||
639 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); | |
640 | if (page) | |
641 | goto got_map_page; | |
642 | ||
643 | ret = vmalloc(memmap_size); | |
644 | if (ret) | |
645 | goto got_map_ptr; | |
646 | ||
647 | return NULL; | |
648 | got_map_page: | |
649 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | |
650 | got_map_ptr: | |
651 | memset(ret, 0, memmap_size); | |
652 | ||
653 | return ret; | |
654 | } | |
655 | ||
656 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | |
657 | unsigned long nr_pages) | |
658 | { | |
659 | return __kmalloc_section_memmap(nr_pages); | |
660 | } | |
661 | ||
662 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |
663 | { | |
664 | if (is_vmalloc_addr(memmap)) | |
665 | vfree(memmap); | |
666 | else | |
667 | free_pages((unsigned long)memmap, | |
668 | get_order(sizeof(struct page) * nr_pages)); | |
669 | } | |
670 | ||
671 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | |
672 | { | |
673 | unsigned long maps_section_nr, removing_section_nr, i; | |
674 | int magic; | |
675 | ||
676 | for (i = 0; i < nr_pages; i++, page++) { | |
677 | magic = atomic_read(&page->_mapcount); | |
678 | ||
679 | BUG_ON(magic == NODE_INFO); | |
680 | ||
681 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | |
682 | removing_section_nr = page->private; | |
683 | ||
684 | /* | |
685 | * When this function is called, the removing section is | |
686 | * logical offlined state. This means all pages are isolated | |
687 | * from page allocator. If removing section's memmap is placed | |
688 | * on the same section, it must not be freed. | |
689 | * If it is freed, page allocator may allocate it which will | |
690 | * be removed physically soon. | |
691 | */ | |
692 | if (maps_section_nr != removing_section_nr) | |
693 | put_page_bootmem(page); | |
694 | } | |
695 | } | |
696 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
697 | ||
698 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | |
699 | { | |
700 | struct page *usemap_page; | |
701 | unsigned long nr_pages; | |
702 | ||
703 | if (!usemap) | |
704 | return; | |
705 | ||
706 | usemap_page = virt_to_page(usemap); | |
707 | /* | |
708 | * Check to see if allocation came from hot-plug-add | |
709 | */ | |
710 | if (PageSlab(usemap_page)) { | |
711 | kfree(usemap); | |
712 | if (memmap) | |
713 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); | |
714 | return; | |
715 | } | |
716 | ||
717 | /* | |
718 | * The usemap came from bootmem. This is packed with other usemaps | |
719 | * on the section which has pgdat at boot time. Just keep it as is now. | |
720 | */ | |
721 | ||
722 | if (memmap) { | |
723 | struct page *memmap_page; | |
724 | memmap_page = virt_to_page(memmap); | |
725 | ||
726 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | |
727 | >> PAGE_SHIFT; | |
728 | ||
729 | free_map_bootmem(memmap_page, nr_pages); | |
730 | } | |
731 | } | |
732 | ||
733 | /* | |
734 | * returns the number of sections whose mem_maps were properly | |
735 | * set. If this is <=0, then that means that the passed-in | |
736 | * map was not consumed and must be freed. | |
737 | */ | |
738 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | |
739 | int nr_pages) | |
740 | { | |
741 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | |
742 | struct pglist_data *pgdat = zone->zone_pgdat; | |
743 | struct mem_section *ms; | |
744 | struct page *memmap; | |
745 | unsigned long *usemap; | |
746 | unsigned long flags; | |
747 | int ret; | |
748 | ||
749 | /* | |
750 | * no locking for this, because it does its own | |
751 | * plus, it does a kmalloc | |
752 | */ | |
753 | ret = sparse_index_init(section_nr, pgdat->node_id); | |
754 | if (ret < 0 && ret != -EEXIST) | |
755 | return ret; | |
756 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); | |
757 | if (!memmap) | |
758 | return -ENOMEM; | |
759 | usemap = __kmalloc_section_usemap(); | |
760 | if (!usemap) { | |
761 | __kfree_section_memmap(memmap, nr_pages); | |
762 | return -ENOMEM; | |
763 | } | |
764 | ||
765 | pgdat_resize_lock(pgdat, &flags); | |
766 | ||
767 | ms = __pfn_to_section(start_pfn); | |
768 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | |
769 | ret = -EEXIST; | |
770 | goto out; | |
771 | } | |
772 | ||
773 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | |
774 | ||
775 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); | |
776 | ||
777 | out: | |
778 | pgdat_resize_unlock(pgdat, &flags); | |
779 | if (ret <= 0) { | |
780 | kfree(usemap); | |
781 | __kfree_section_memmap(memmap, nr_pages); | |
782 | } | |
783 | return ret; | |
784 | } | |
785 | ||
786 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | |
787 | { | |
788 | struct page *memmap = NULL; | |
789 | unsigned long *usemap = NULL; | |
790 | ||
791 | if (ms->section_mem_map) { | |
792 | usemap = ms->pageblock_flags; | |
793 | memmap = sparse_decode_mem_map(ms->section_mem_map, | |
794 | __section_nr(ms)); | |
795 | ms->section_mem_map = 0; | |
796 | ms->pageblock_flags = NULL; | |
797 | } | |
798 | ||
799 | free_section_usemap(memmap, usemap); | |
800 | } | |
801 | #endif |