]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/sparse.c
memory hotplug: free memmaps allocated by bootmem
[net-next-2.6.git] / mm / sparse.c
CommitLineData
d41dee36
AW
1/*
2 * sparse memory mappings.
3 */
d41dee36
AW
4#include <linux/mm.h>
5#include <linux/mmzone.h>
6#include <linux/bootmem.h>
0b0acbec 7#include <linux/highmem.h>
d41dee36 8#include <linux/module.h>
28ae55c9 9#include <linux/spinlock.h>
0b0acbec 10#include <linux/vmalloc.h>
0c0a4a51 11#include "internal.h"
d41dee36 12#include <asm/dma.h>
8f6aac41
CL
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
d41dee36
AW
15
16/*
17 * Permanent SPARSEMEM data:
18 *
19 * 1) mem_section - memory sections, mem_map's for valid memory
20 */
3e347261 21#ifdef CONFIG_SPARSEMEM_EXTREME
802f192e 22struct mem_section *mem_section[NR_SECTION_ROOTS]
22fc6ecc 23 ____cacheline_internodealigned_in_smp;
3e347261
BP
24#else
25struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6ecc 26 ____cacheline_internodealigned_in_smp;
3e347261
BP
27#endif
28EXPORT_SYMBOL(mem_section);
29
89689ae7
CL
30#ifdef NODE_NOT_IN_PAGE_FLAGS
31/*
32 * If we did not store the node number in the page then we have to
33 * do a lookup in the section_to_node_table in order to find which
34 * node the page belongs to.
35 */
36#if MAX_NUMNODES <= 256
37static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
38#else
39static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
40#endif
41
25ba77c1 42int page_to_nid(struct page *page)
89689ae7
CL
43{
44 return section_to_node_table[page_to_section(page)];
45}
46EXPORT_SYMBOL(page_to_nid);
85770ffe
AW
47
48static void set_section_nid(unsigned long section_nr, int nid)
49{
50 section_to_node_table[section_nr] = nid;
51}
52#else /* !NODE_NOT_IN_PAGE_FLAGS */
53static inline void set_section_nid(unsigned long section_nr, int nid)
54{
55}
89689ae7
CL
56#endif
57
3e347261 58#ifdef CONFIG_SPARSEMEM_EXTREME
577a32f6 59static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
28ae55c9
DH
60{
61 struct mem_section *section = NULL;
62 unsigned long array_size = SECTIONS_PER_ROOT *
63 sizeof(struct mem_section);
64
39d24e64 65 if (slab_is_available())
46a66eec
MK
66 section = kmalloc_node(array_size, GFP_KERNEL, nid);
67 else
68 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
28ae55c9
DH
69
70 if (section)
71 memset(section, 0, array_size);
72
73 return section;
3e347261 74}
802f192e 75
a3142c8e 76static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e 77{
34af946a 78 static DEFINE_SPINLOCK(index_init_lock);
28ae55c9
DH
79 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
80 struct mem_section *section;
81 int ret = 0;
802f192e
BP
82
83 if (mem_section[root])
28ae55c9 84 return -EEXIST;
3e347261 85
28ae55c9 86 section = sparse_index_alloc(nid);
af0cd5a7
WC
87 if (!section)
88 return -ENOMEM;
28ae55c9
DH
89 /*
90 * This lock keeps two different sections from
91 * reallocating for the same index
92 */
93 spin_lock(&index_init_lock);
3e347261 94
28ae55c9
DH
95 if (mem_section[root]) {
96 ret = -EEXIST;
97 goto out;
98 }
99
100 mem_section[root] = section;
101out:
102 spin_unlock(&index_init_lock);
103 return ret;
104}
105#else /* !SPARSEMEM_EXTREME */
106static inline int sparse_index_init(unsigned long section_nr, int nid)
107{
108 return 0;
802f192e 109}
28ae55c9
DH
110#endif
111
4ca644d9
DH
112/*
113 * Although written for the SPARSEMEM_EXTREME case, this happens
cd881a6b 114 * to also work for the flat array case because
4ca644d9
DH
115 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
116 */
117int __section_nr(struct mem_section* ms)
118{
119 unsigned long root_nr;
120 struct mem_section* root;
121
12783b00
MK
122 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
123 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d9
DH
124 if (!root)
125 continue;
126
127 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
128 break;
129 }
130
131 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
132}
133
30c253e6
AW
134/*
135 * During early boot, before section_mem_map is used for an actual
136 * mem_map, we use section_mem_map to store the section's NUMA
137 * node. This keeps us from having to use another data structure. The
138 * node information is cleared just before we store the real mem_map.
139 */
140static inline unsigned long sparse_encode_early_nid(int nid)
141{
142 return (nid << SECTION_NID_SHIFT);
143}
144
145static inline int sparse_early_nid(struct mem_section *section)
146{
147 return (section->section_mem_map >> SECTION_NID_SHIFT);
148}
149
d41dee36 150/* Record a memory area against a node. */
a3142c8e 151void __init memory_present(int nid, unsigned long start, unsigned long end)
d41dee36 152{
bead9a3a 153 unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
d41dee36
AW
154 unsigned long pfn;
155
bead9a3a
IM
156 /*
157 * Sanity checks - do not allow an architecture to pass
158 * in larger pfns than the maximum scope of sparsemem:
159 */
160 if (start >= max_arch_pfn)
161 return;
162 if (end >= max_arch_pfn)
163 end = max_arch_pfn;
164
d41dee36
AW
165 start &= PAGE_SECTION_MASK;
166 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
167 unsigned long section = pfn_to_section_nr(pfn);
802f192e
BP
168 struct mem_section *ms;
169
170 sparse_index_init(section, nid);
85770ffe 171 set_section_nid(section, nid);
802f192e
BP
172
173 ms = __nr_to_section(section);
174 if (!ms->section_mem_map)
30c253e6
AW
175 ms->section_mem_map = sparse_encode_early_nid(nid) |
176 SECTION_MARKED_PRESENT;
d41dee36
AW
177 }
178}
179
180/*
181 * Only used by the i386 NUMA architecures, but relatively
182 * generic code.
183 */
184unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
185 unsigned long end_pfn)
186{
187 unsigned long pfn;
188 unsigned long nr_pages = 0;
189
190 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
191 if (nid != early_pfn_to_nid(pfn))
192 continue;
193
540557b9 194 if (pfn_present(pfn))
d41dee36
AW
195 nr_pages += PAGES_PER_SECTION;
196 }
197
198 return nr_pages * sizeof(struct page);
199}
200
29751f69
AW
201/*
202 * Subtle, we encode the real pfn into the mem_map such that
203 * the identity pfn - section_mem_map will return the actual
204 * physical page frame number.
205 */
206static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
207{
208 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
209}
210
211/*
ea01ea93 212 * Decode mem_map from the coded memmap
29751f69 213 */
29751f69
AW
214struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
215{
ea01ea93
BP
216 /* mask off the extra low bits of information */
217 coded_mem_map &= SECTION_MAP_MASK;
29751f69
AW
218 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
219}
220
a3142c8e 221static int __meminit sparse_init_one_section(struct mem_section *ms,
5c0e3066
MG
222 unsigned long pnum, struct page *mem_map,
223 unsigned long *pageblock_bitmap)
29751f69 224{
540557b9 225 if (!present_section(ms))
29751f69
AW
226 return -EINVAL;
227
30c253e6 228 ms->section_mem_map &= ~SECTION_MAP_MASK;
540557b9
AW
229 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
230 SECTION_HAS_MEM_MAP;
5c0e3066 231 ms->pageblock_flags = pageblock_bitmap;
29751f69
AW
232
233 return 1;
234}
235
04753278 236unsigned long usemap_size(void)
5c0e3066
MG
237{
238 unsigned long size_bytes;
239 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
240 size_bytes = roundup(size_bytes, sizeof(unsigned long));
241 return size_bytes;
242}
243
244#ifdef CONFIG_MEMORY_HOTPLUG
245static unsigned long *__kmalloc_section_usemap(void)
246{
247 return kmalloc(usemap_size(), GFP_KERNEL);
248}
249#endif /* CONFIG_MEMORY_HOTPLUG */
250
a322f8ab 251static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
5c0e3066 252{
86f6dae1 253 unsigned long *usemap, section_nr;
5c0e3066
MG
254 struct mem_section *ms = __nr_to_section(pnum);
255 int nid = sparse_early_nid(ms);
86f6dae1 256 struct pglist_data *pgdat = NODE_DATA(nid);
5c0e3066 257
86f6dae1
YG
258 /*
259 * Usemap's page can't be freed until freeing other sections
260 * which use it. And, Pgdat has same feature.
261 * If section A has pgdat and section B has usemap for other
262 * sections (includes section A), both sections can't be removed,
263 * because there is the dependency each other.
264 * To solve above issue, this collects all usemap on the same section
265 * which has pgdat.
266 */
267 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
268 usemap = alloc_bootmem_section(usemap_size(), section_nr);
5c0e3066
MG
269 if (usemap)
270 return usemap;
271
272 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
273 nid = 0;
274
275 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
276 return NULL;
277}
278
8f6aac41 279#ifndef CONFIG_SPARSEMEM_VMEMMAP
98f3cfc1 280struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
29751f69
AW
281{
282 struct page *map;
29751f69
AW
283
284 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
285 if (map)
286 return map;
287
9d99217a
YG
288 map = alloc_bootmem_pages_node(NODE_DATA(nid),
289 PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
8f6aac41
CL
290 return map;
291}
292#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
293
294struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
295{
296 struct page *map;
297 struct mem_section *ms = __nr_to_section(pnum);
298 int nid = sparse_early_nid(ms);
299
98f3cfc1 300 map = sparse_mem_map_populate(pnum, nid);
29751f69
AW
301 if (map)
302 return map;
303
8f6aac41
CL
304 printk(KERN_ERR "%s: sparsemem memory map backing failed "
305 "some memory will not be available.\n", __FUNCTION__);
802f192e 306 ms->section_mem_map = 0;
29751f69
AW
307 return NULL;
308}
309
c2b91e2e
YL
310void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
311{
312}
193faea9
SR
313/*
314 * Allocate the accumulated non-linear sections, allocate a mem_map
315 * for each and record the physical to section mapping.
316 */
317void __init sparse_init(void)
318{
319 unsigned long pnum;
320 struct page *map;
5c0e3066 321 unsigned long *usemap;
e123dd3f
YL
322 unsigned long **usemap_map;
323 int size;
324
325 /*
326 * map is using big page (aka 2M in x86 64 bit)
327 * usemap is less one page (aka 24 bytes)
328 * so alloc 2M (with 2M align) and 24 bytes in turn will
329 * make next 2M slip to one more 2M later.
330 * then in big system, the memory will have a lot of holes...
331 * here try to allocate 2M pages continously.
332 *
333 * powerpc need to call sparse_init_one_section right after each
334 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
335 */
336 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
337 usemap_map = alloc_bootmem(size);
338 if (!usemap_map)
339 panic("can not allocate usemap_map\n");
193faea9
SR
340
341 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
540557b9 342 if (!present_section_nr(pnum))
193faea9 343 continue;
e123dd3f
YL
344 usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
345 }
193faea9 346
e123dd3f
YL
347 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
348 if (!present_section_nr(pnum))
193faea9 349 continue;
5c0e3066 350
e123dd3f 351 usemap = usemap_map[pnum];
5c0e3066
MG
352 if (!usemap)
353 continue;
354
e123dd3f
YL
355 map = sparse_early_mem_map_alloc(pnum);
356 if (!map)
357 continue;
358
5c0e3066
MG
359 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
360 usemap);
193faea9 361 }
e123dd3f 362
c2b91e2e
YL
363 vmemmap_populate_print_last();
364
e123dd3f 365 free_bootmem(__pa(usemap_map), size);
193faea9
SR
366}
367
368#ifdef CONFIG_MEMORY_HOTPLUG
98f3cfc1
YG
369#ifdef CONFIG_SPARSEMEM_VMEMMAP
370static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
371 unsigned long nr_pages)
372{
373 /* This will make the necessary allocations eventually. */
374 return sparse_mem_map_populate(pnum, nid);
375}
376static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
377{
378 return; /* XXX: Not implemented yet */
379}
0c0a4a51
YG
380static void free_map_bootmem(struct page *page, unsigned long nr_pages)
381{
382}
98f3cfc1 383#else
0b0acbec
DH
384static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
385{
386 struct page *page, *ret;
387 unsigned long memmap_size = sizeof(struct page) * nr_pages;
388
f2d0aa5b 389 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
0b0acbec
DH
390 if (page)
391 goto got_map_page;
392
393 ret = vmalloc(memmap_size);
394 if (ret)
395 goto got_map_ptr;
396
397 return NULL;
398got_map_page:
399 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
400got_map_ptr:
401 memset(ret, 0, memmap_size);
402
403 return ret;
404}
405
98f3cfc1
YG
406static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
407 unsigned long nr_pages)
408{
409 return __kmalloc_section_memmap(nr_pages);
410}
411
0b0acbec
DH
412static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
413{
9e2779fa 414 if (is_vmalloc_addr(memmap))
0b0acbec
DH
415 vfree(memmap);
416 else
417 free_pages((unsigned long)memmap,
418 get_order(sizeof(struct page) * nr_pages));
419}
0c0a4a51
YG
420
421static void free_map_bootmem(struct page *page, unsigned long nr_pages)
422{
423 unsigned long maps_section_nr, removing_section_nr, i;
424 int magic;
425
426 for (i = 0; i < nr_pages; i++, page++) {
427 magic = atomic_read(&page->_mapcount);
428
429 BUG_ON(magic == NODE_INFO);
430
431 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
432 removing_section_nr = page->private;
433
434 /*
435 * When this function is called, the removing section is
436 * logical offlined state. This means all pages are isolated
437 * from page allocator. If removing section's memmap is placed
438 * on the same section, it must not be freed.
439 * If it is freed, page allocator may allocate it which will
440 * be removed physically soon.
441 */
442 if (maps_section_nr != removing_section_nr)
443 put_page_bootmem(page);
444 }
445}
98f3cfc1 446#endif /* CONFIG_SPARSEMEM_VMEMMAP */
0b0acbec 447
ea01ea93
BP
448static void free_section_usemap(struct page *memmap, unsigned long *usemap)
449{
0c0a4a51
YG
450 struct page *usemap_page;
451 unsigned long nr_pages;
452
ea01ea93
BP
453 if (!usemap)
454 return;
455
0c0a4a51 456 usemap_page = virt_to_page(usemap);
ea01ea93
BP
457 /*
458 * Check to see if allocation came from hot-plug-add
459 */
0c0a4a51 460 if (PageSlab(usemap_page)) {
ea01ea93
BP
461 kfree(usemap);
462 if (memmap)
463 __kfree_section_memmap(memmap, PAGES_PER_SECTION);
464 return;
465 }
466
467 /*
0c0a4a51
YG
468 * The usemap came from bootmem. This is packed with other usemaps
469 * on the section which has pgdat at boot time. Just keep it as is now.
ea01ea93 470 */
0c0a4a51
YG
471
472 if (memmap) {
473 struct page *memmap_page;
474 memmap_page = virt_to_page(memmap);
475
476 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
477 >> PAGE_SHIFT;
478
479 free_map_bootmem(memmap_page, nr_pages);
480 }
ea01ea93
BP
481}
482
29751f69
AW
483/*
484 * returns the number of sections whose mem_maps were properly
485 * set. If this is <=0, then that means that the passed-in
486 * map was not consumed and must be freed.
487 */
0b0acbec
DH
488int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
489 int nr_pages)
29751f69 490{
0b0acbec
DH
491 unsigned long section_nr = pfn_to_section_nr(start_pfn);
492 struct pglist_data *pgdat = zone->zone_pgdat;
493 struct mem_section *ms;
494 struct page *memmap;
5c0e3066 495 unsigned long *usemap;
0b0acbec
DH
496 unsigned long flags;
497 int ret;
29751f69 498
0b0acbec
DH
499 /*
500 * no locking for this, because it does its own
501 * plus, it does a kmalloc
502 */
bbd06825
WC
503 ret = sparse_index_init(section_nr, pgdat->node_id);
504 if (ret < 0 && ret != -EEXIST)
505 return ret;
98f3cfc1 506 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
bbd06825
WC
507 if (!memmap)
508 return -ENOMEM;
5c0e3066 509 usemap = __kmalloc_section_usemap();
bbd06825
WC
510 if (!usemap) {
511 __kfree_section_memmap(memmap, nr_pages);
512 return -ENOMEM;
513 }
0b0acbec
DH
514
515 pgdat_resize_lock(pgdat, &flags);
29751f69 516
0b0acbec
DH
517 ms = __pfn_to_section(start_pfn);
518 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
519 ret = -EEXIST;
520 goto out;
521 }
5c0e3066 522
29751f69
AW
523 ms->section_mem_map |= SECTION_MARKED_PRESENT;
524
5c0e3066 525 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
0b0acbec 526
0b0acbec
DH
527out:
528 pgdat_resize_unlock(pgdat, &flags);
bbd06825
WC
529 if (ret <= 0) {
530 kfree(usemap);
46a66eec 531 __kfree_section_memmap(memmap, nr_pages);
bbd06825 532 }
0b0acbec 533 return ret;
29751f69 534}
ea01ea93
BP
535
536void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
537{
538 struct page *memmap = NULL;
539 unsigned long *usemap = NULL;
540
541 if (ms->section_mem_map) {
542 usemap = ms->pageblock_flags;
543 memmap = sparse_decode_mem_map(ms->section_mem_map,
544 __section_nr(ms));
545 ms->section_mem_map = 0;
546 ms->pageblock_flags = NULL;
547 }
548
549 free_section_usemap(memmap, usemap);
550}
a3142c8e 551#endif