]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/bootmem.c
bootmem: add debugging framework
[net-next-2.6.git] / mm / bootmem.c
CommitLineData
1da177e4 1/*
57cfc29e 2 * bootmem - A boot-time physical memory allocator and configurator
1da177e4
LT
3 *
4 * Copyright (C) 1999 Ingo Molnar
57cfc29e
JW
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
1da177e4 7 *
57cfc29e
JW
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
1da177e4 10 */
1da177e4 11#include <linux/init.h>
bbc7b92e 12#include <linux/pfn.h>
1da177e4 13#include <linux/bootmem.h>
1da177e4 14#include <linux/module.h>
e786e86a
FBH
15
16#include <asm/bug.h>
1da177e4 17#include <asm/io.h>
dfd54cbc 18#include <asm/processor.h>
e786e86a 19
1da177e4
LT
20#include "internal.h"
21
1da177e4
LT
22unsigned long max_low_pfn;
23unsigned long min_low_pfn;
24unsigned long max_pfn;
25
679bc9fb 26static LIST_HEAD(bdata_list);
92aa63a5
VG
27#ifdef CONFIG_CRASH_DUMP
28/*
29 * If we have booted due to a crash, max_pfn will be a very low value. We need
30 * to know the amount of memory that the previous kernel used.
31 */
32unsigned long saved_max_pfn;
33#endif
34
b61bfa3c
JW
35bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
36
2e5237da
JW
37static int bootmem_debug;
38
39static int __init bootmem_debug_setup(char *buf)
40{
41 bootmem_debug = 1;
42 return 0;
43}
44early_param("bootmem_debug", bootmem_debug_setup);
45
46#define bdebug(fmt, args...) ({ \
47 if (unlikely(bootmem_debug)) \
48 printk(KERN_INFO \
49 "bootmem::%s " fmt, \
50 __FUNCTION__, ## args); \
51})
52
223e8dc9
JW
53/*
54 * Given an initialised bdata, it returns the size of the boot bitmap
55 */
56static unsigned long __init get_mapsize(bootmem_data_t *bdata)
57{
58 unsigned long mapsize;
59 unsigned long start = PFN_DOWN(bdata->node_boot_start);
60 unsigned long end = bdata->node_low_pfn;
61
62 mapsize = ((end - start) + 7) / 8;
63 return ALIGN(mapsize, sizeof(long));
64}
65
a66fd7da
JW
66/**
67 * bootmem_bootmap_pages - calculate bitmap size in pages
68 * @pages: number of pages the bitmap has to represent
69 */
f71bf0ca 70unsigned long __init bootmem_bootmap_pages(unsigned long pages)
1da177e4
LT
71{
72 unsigned long mapsize;
73
74 mapsize = (pages+7)/8;
75 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
76 mapsize >>= PAGE_SHIFT;
77
78 return mapsize;
79}
f71bf0ca 80
679bc9fb
KH
81/*
82 * link bdata in order
83 */
69d49e68 84static void __init link_bootmem(bootmem_data_t *bdata)
679bc9fb
KH
85{
86 bootmem_data_t *ent;
f71bf0ca 87
679bc9fb
KH
88 if (list_empty(&bdata_list)) {
89 list_add(&bdata->list, &bdata_list);
90 return;
91 }
92 /* insert in order */
93 list_for_each_entry(ent, &bdata_list, list) {
94 if (bdata->node_boot_start < ent->node_boot_start) {
95 list_add_tail(&bdata->list, &ent->list);
96 return;
97 }
98 }
99 list_add_tail(&bdata->list, &bdata_list);
679bc9fb
KH
100}
101
1da177e4
LT
102/*
103 * Called once to set up the allocator itself.
104 */
8ae04463 105static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
1da177e4
LT
106 unsigned long mapstart, unsigned long start, unsigned long end)
107{
bbc7b92e 108 unsigned long mapsize;
1da177e4 109
2dbb51c4 110 mminit_validate_memmodel_limits(&start, &end);
bbc7b92e
FBH
111 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
112 bdata->node_boot_start = PFN_PHYS(start);
1da177e4 113 bdata->node_low_pfn = end;
679bc9fb 114 link_bootmem(bdata);
1da177e4
LT
115
116 /*
117 * Initially all pages are reserved - setup_arch() has to
118 * register free RAM areas explicitly.
119 */
bbc7b92e 120 mapsize = get_mapsize(bdata);
1da177e4
LT
121 memset(bdata->node_bootmem_map, 0xff, mapsize);
122
2e5237da
JW
123 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
124 bdata - bootmem_node_data, start, mapstart, end, mapsize);
125
1da177e4
LT
126 return mapsize;
127}
128
a66fd7da
JW
129/**
130 * init_bootmem_node - register a node as boot memory
131 * @pgdat: node to register
132 * @freepfn: pfn where the bitmap for this node is to be placed
133 * @startpfn: first pfn on the node
134 * @endpfn: first pfn after the node
135 *
136 * Returns the number of bytes needed to hold the bitmap for this node.
137 */
223e8dc9
JW
138unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
139 unsigned long startpfn, unsigned long endpfn)
140{
141 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
142}
143
a66fd7da
JW
144/**
145 * init_bootmem - register boot memory
146 * @start: pfn where the bitmap is to be placed
147 * @pages: number of available physical pages
148 *
149 * Returns the number of bytes needed to hold the bitmap.
150 */
223e8dc9
JW
151unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
152{
153 max_low_pfn = pages;
154 min_low_pfn = start;
155 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
156}
157
158static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
159{
160 struct page *page;
161 unsigned long pfn;
162 unsigned long i, count;
163 unsigned long idx;
164 unsigned long *map;
165 int gofast = 0;
166
167 BUG_ON(!bdata->node_bootmem_map);
168
169 count = 0;
170 /* first extant page of the node */
171 pfn = PFN_DOWN(bdata->node_boot_start);
172 idx = bdata->node_low_pfn - pfn;
173 map = bdata->node_bootmem_map;
174 /*
175 * Check if we are aligned to BITS_PER_LONG pages. If so, we might
176 * be able to free page orders of that size at once.
177 */
178 if (!(pfn & (BITS_PER_LONG-1)))
179 gofast = 1;
180
181 for (i = 0; i < idx; ) {
182 unsigned long v = ~map[i / BITS_PER_LONG];
183
184 if (gofast && v == ~0UL) {
185 int order;
186
187 page = pfn_to_page(pfn);
188 count += BITS_PER_LONG;
189 order = ffs(BITS_PER_LONG) - 1;
190 __free_pages_bootmem(page, order);
191 i += BITS_PER_LONG;
192 page += BITS_PER_LONG;
193 } else if (v) {
194 unsigned long m;
195
196 page = pfn_to_page(pfn);
197 for (m = 1; m && i < idx; m<<=1, page++, i++) {
198 if (v & m) {
199 count++;
200 __free_pages_bootmem(page, 0);
201 }
202 }
203 } else {
204 i += BITS_PER_LONG;
205 }
206 pfn += BITS_PER_LONG;
207 }
208
209 /*
210 * Now free the allocator bitmap itself, it's not
211 * needed anymore:
212 */
213 page = virt_to_page(bdata->node_bootmem_map);
214 idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
215 for (i = 0; i < idx; i++, page++)
216 __free_pages_bootmem(page, 0);
217 count += i;
218 bdata->node_bootmem_map = NULL;
219
2e5237da
JW
220 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
221
223e8dc9
JW
222 return count;
223}
224
a66fd7da
JW
225/**
226 * free_all_bootmem_node - release a node's free pages to the buddy allocator
227 * @pgdat: node to be released
228 *
229 * Returns the number of pages actually released.
230 */
223e8dc9
JW
231unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
232{
233 register_page_bootmem_info_node(pgdat);
234 return free_all_bootmem_core(pgdat->bdata);
235}
236
a66fd7da
JW
237/**
238 * free_all_bootmem - release free pages to the buddy allocator
239 *
240 * Returns the number of pages actually released.
241 */
223e8dc9
JW
242unsigned long __init free_all_bootmem(void)
243{
244 return free_all_bootmem_core(NODE_DATA(0)->bdata);
245}
246
247static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
248 unsigned long size)
249{
250 unsigned long sidx, eidx;
251 unsigned long i;
252
253 BUG_ON(!size);
254
255 /* out range */
256 if (addr + size < bdata->node_boot_start ||
257 PFN_DOWN(addr) > bdata->node_low_pfn)
258 return;
259 /*
260 * round down end of usable mem, partially free pages are
261 * considered reserved.
262 */
263
264 if (addr >= bdata->node_boot_start && addr < bdata->last_success)
265 bdata->last_success = addr;
266
267 /*
268 * Round up to index to the range.
269 */
270 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
271 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
272 else
273 sidx = 0;
274
275 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
276 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
277 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
278
2e5237da
JW
279 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
280 sidx + PFN_DOWN(bdata->node_boot_start),
281 eidx + PFN_DOWN(bdata->node_boot_start));
282
223e8dc9
JW
283 for (i = sidx; i < eidx; i++) {
284 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
285 BUG();
286 }
287}
288
a66fd7da
JW
289/**
290 * free_bootmem_node - mark a page range as usable
291 * @pgdat: node the range resides on
292 * @physaddr: starting address of the range
293 * @size: size of the range in bytes
294 *
295 * Partial pages will be considered reserved and left as they are.
296 *
297 * Only physical pages that actually reside on @pgdat are marked.
298 */
223e8dc9
JW
299void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
300 unsigned long size)
301{
302 free_bootmem_core(pgdat->bdata, physaddr, size);
303}
304
a66fd7da
JW
305/**
306 * free_bootmem - mark a page range as usable
307 * @addr: starting address of the range
308 * @size: size of the range in bytes
309 *
310 * Partial pages will be considered reserved and left as they are.
311 *
312 * All physical pages within the range are marked, no matter what
313 * node they reside on.
314 */
223e8dc9
JW
315void __init free_bootmem(unsigned long addr, unsigned long size)
316{
317 bootmem_data_t *bdata;
318 list_for_each_entry(bdata, &bdata_list, list)
319 free_bootmem_core(bdata, addr, size);
320}
321
1da177e4
LT
322/*
323 * Marks a particular physical memory range as unallocatable. Usable RAM
324 * might be used for boot-time allocations - or it might get added
325 * to the free page pool later on.
326 */
a5645a61 327static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
72a7fe39 328 unsigned long addr, unsigned long size, int flags)
1da177e4 329{
bbc7b92e 330 unsigned long sidx, eidx;
1da177e4 331 unsigned long i;
a5645a61
YL
332
333 BUG_ON(!size);
334
335 /* out of range, don't hold other */
336 if (addr + size < bdata->node_boot_start ||
337 PFN_DOWN(addr) > bdata->node_low_pfn)
338 return 0;
bbc7b92e 339
1da177e4 340 /*
a5645a61 341 * Round up to index to the range.
1da177e4 342 */
a5645a61
YL
343 if (addr > bdata->node_boot_start)
344 sidx= PFN_DOWN(addr - bdata->node_boot_start);
345 else
346 sidx = 0;
347
348 eidx = PFN_UP(addr + size - bdata->node_boot_start);
349 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
350 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
351
352 for (i = sidx; i < eidx; i++) {
353 if (test_bit(i, bdata->node_bootmem_map)) {
354 if (flags & BOOTMEM_EXCLUSIVE)
355 return -EBUSY;
356 }
357 }
358
359 return 0;
360
361}
362
363static void __init reserve_bootmem_core(bootmem_data_t *bdata,
364 unsigned long addr, unsigned long size, int flags)
365{
366 unsigned long sidx, eidx;
367 unsigned long i;
368
1da177e4 369 BUG_ON(!size);
bbc7b92e 370
a5645a61
YL
371 /* out of range */
372 if (addr + size < bdata->node_boot_start ||
373 PFN_DOWN(addr) > bdata->node_low_pfn)
374 return;
375
376 /*
377 * Round up to index to the range.
378 */
379 if (addr > bdata->node_boot_start)
380 sidx= PFN_DOWN(addr - bdata->node_boot_start);
381 else
382 sidx = 0;
383
bbc7b92e 384 eidx = PFN_UP(addr + size - bdata->node_boot_start);
a5645a61
YL
385 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
386 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
1da177e4 387
2e5237da
JW
388 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
389 bdata - bootmem_node_data,
390 sidx + PFN_DOWN(bdata->node_boot_start),
391 eidx + PFN_DOWN(bdata->node_boot_start),
392 flags);
393
394 for (i = sidx; i < eidx; i++)
395 if (test_and_set_bit(i, bdata->node_bootmem_map))
396 bdebug("hm, page %lx reserved twice.\n",
397 PFN_DOWN(bdata->node_boot_start) + i);
1da177e4
LT
398}
399
a66fd7da
JW
400/**
401 * reserve_bootmem_node - mark a page range as reserved
402 * @pgdat: node the range resides on
403 * @physaddr: starting address of the range
404 * @size: size of the range in bytes
405 * @flags: reservation flags (see linux/bootmem.h)
406 *
407 * Partial pages will be reserved.
408 *
409 * Only physical pages that actually reside on @pgdat are marked.
410 */
223e8dc9
JW
411int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
412 unsigned long size, int flags)
1da177e4 413{
223e8dc9 414 int ret;
1da177e4 415
223e8dc9
JW
416 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
417 if (ret < 0)
418 return -ENOMEM;
419 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
420 return 0;
421}
5a982cbc 422
223e8dc9 423#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
a66fd7da
JW
424/**
425 * reserve_bootmem - mark a page range as usable
426 * @addr: starting address of the range
427 * @size: size of the range in bytes
428 * @flags: reservation flags (see linux/bootmem.h)
429 *
430 * Partial pages will be reserved.
431 *
432 * All physical pages within the range are marked, no matter what
433 * node they reside on.
434 */
223e8dc9
JW
435int __init reserve_bootmem(unsigned long addr, unsigned long size,
436 int flags)
437{
438 bootmem_data_t *bdata;
439 int ret;
1da177e4 440
223e8dc9
JW
441 list_for_each_entry(bdata, &bdata_list, list) {
442 ret = can_reserve_bootmem_core(bdata, addr, size, flags);
443 if (ret < 0)
444 return ret;
1da177e4 445 }
223e8dc9
JW
446 list_for_each_entry(bdata, &bdata_list, list)
447 reserve_bootmem_core(bdata, addr, size, flags);
448
449 return 0;
1da177e4 450}
223e8dc9 451#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
1da177e4
LT
452
453/*
454 * We 'merge' subsequent allocations to save space. We might 'lose'
455 * some fraction of a page if allocations cannot be satisfied due to
456 * size constraints on boxes where there is physical RAM space
457 * fragmentation - in these cases (mostly large memory boxes) this
458 * is not a problem.
459 *
460 * On low memory boxes we get it right in 100% of the cases.
461 *
462 * alignment has to be a power of 2 value.
463 *
464 * NOTE: This function is _not_ reentrant.
465 */
ffc6421f
JW
466static void * __init
467alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
468 unsigned long align, unsigned long goal, unsigned long limit)
1da177e4 469{
9a2dc04c 470 unsigned long areasize, preferred;
bbc7b92e 471 unsigned long i, start = 0, incr, eidx, end_pfn;
1da177e4 472 void *ret;
9a2dc04c
YL
473 unsigned long node_boot_start;
474 void *node_bootmem_map;
1da177e4 475
f71bf0ca 476 if (!size) {
ffc6421f 477 printk("alloc_bootmem_core(): zero-sized request\n");
1da177e4
LT
478 BUG();
479 }
480 BUG_ON(align & (align-1));
481
7c309a64
CK
482 /* on nodes without memory - bootmem_map is NULL */
483 if (!bdata->node_bootmem_map)
484 return NULL;
485
2e5237da
JW
486 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
487 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
488 align, goal, limit);
489
9a2dc04c
YL
490 /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
491 node_boot_start = bdata->node_boot_start;
492 node_bootmem_map = bdata->node_bootmem_map;
493 if (align) {
494 node_boot_start = ALIGN(bdata->node_boot_start, align);
495 if (node_boot_start > bdata->node_boot_start)
496 node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
497 PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
498 }
499
500 if (limit && node_boot_start >= limit)
501 return NULL;
502
bbc7b92e
FBH
503 end_pfn = bdata->node_low_pfn;
504 limit = PFN_DOWN(limit);
281dd25c
YG
505 if (limit && end_pfn > limit)
506 end_pfn = limit;
507
9a2dc04c 508 eidx = end_pfn - PFN_DOWN(node_boot_start);
1da177e4
LT
509
510 /*
511 * We try to allocate bootmem pages above 'goal'
512 * first, then we try to allocate lower pages.
513 */
ad09315c
YL
514 preferred = 0;
515 if (goal && PFN_DOWN(goal) < end_pfn) {
9a2dc04c
YL
516 if (goal > node_boot_start)
517 preferred = goal - node_boot_start;
1da177e4 518
9a2dc04c
YL
519 if (bdata->last_success > node_boot_start &&
520 bdata->last_success - node_boot_start >= preferred)
281dd25c 521 if (!limit || (limit && limit > bdata->last_success))
9a2dc04c 522 preferred = bdata->last_success - node_boot_start;
ad09315c 523 }
1da177e4 524
9a2dc04c 525 preferred = PFN_DOWN(ALIGN(preferred, align));
bbc7b92e 526 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
1da177e4
LT
527 incr = align >> PAGE_SHIFT ? : 1;
528
529restart_scan:
ad09315c 530 for (i = preferred; i < eidx;) {
1da177e4 531 unsigned long j;
ad09315c 532
9a2dc04c 533 i = find_next_zero_bit(node_bootmem_map, eidx, i);
1da177e4 534 i = ALIGN(i, incr);
66d43e98
HM
535 if (i >= eidx)
536 break;
9a2dc04c 537 if (test_bit(i, node_bootmem_map)) {
ad09315c 538 i += incr;
1da177e4 539 continue;
ad09315c 540 }
1da177e4
LT
541 for (j = i + 1; j < i + areasize; ++j) {
542 if (j >= eidx)
543 goto fail_block;
9a2dc04c 544 if (test_bit(j, node_bootmem_map))
1da177e4
LT
545 goto fail_block;
546 }
547 start = i;
548 goto found;
549 fail_block:
550 i = ALIGN(j, incr);
ad09315c
YL
551 if (i == j)
552 i += incr;
1da177e4
LT
553 }
554
9a2dc04c
YL
555 if (preferred > 0) {
556 preferred = 0;
1da177e4
LT
557 goto restart_scan;
558 }
559 return NULL;
560
561found:
9a2dc04c 562 bdata->last_success = PFN_PHYS(start) + node_boot_start;
1da177e4
LT
563 BUG_ON(start >= eidx);
564
565 /*
566 * Is the next page of the previous allocation-end the start
567 * of this allocation's buffer? If yes then we can 'merge'
568 * the previous partial page with this allocation.
569 */
570 if (align < PAGE_SIZE &&
571 bdata->last_offset && bdata->last_pos+1 == start) {
9a2dc04c 572 unsigned long offset, remaining_size;
8c0e33c1 573 offset = ALIGN(bdata->last_offset, align);
1da177e4 574 BUG_ON(offset > PAGE_SIZE);
f71bf0ca 575 remaining_size = PAGE_SIZE - offset;
1da177e4
LT
576 if (size < remaining_size) {
577 areasize = 0;
578 /* last_pos unchanged */
f71bf0ca
FBH
579 bdata->last_offset = offset + size;
580 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
9a2dc04c 581 offset + node_boot_start);
1da177e4
LT
582 } else {
583 remaining_size = size - remaining_size;
f71bf0ca
FBH
584 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
585 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
9a2dc04c 586 offset + node_boot_start);
f71bf0ca 587 bdata->last_pos = start + areasize - 1;
1da177e4
LT
588 bdata->last_offset = remaining_size;
589 }
590 bdata->last_offset &= ~PAGE_MASK;
591 } else {
592 bdata->last_pos = start + areasize - 1;
593 bdata->last_offset = size & ~PAGE_MASK;
9a2dc04c 594 ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
1da177e4
LT
595 }
596
2e5237da
JW
597 bdebug("nid=%td start=%lx end=%lx\n",
598 bdata - bootmem_node_data,
599 start + PFN_DOWN(bdata->node_boot_start),
600 start + areasize + PFN_DOWN(bdata->node_boot_start));
601
1da177e4
LT
602 /*
603 * Reserve the area now:
604 */
f71bf0ca 605 for (i = start; i < start + areasize; i++)
9a2dc04c 606 if (unlikely(test_and_set_bit(i, node_bootmem_map)))
1da177e4
LT
607 BUG();
608 memset(ret, 0, size);
609 return ret;
610}
611
a66fd7da
JW
612/**
613 * __alloc_bootmem_nopanic - allocate boot memory without panicking
614 * @size: size of the request in bytes
615 * @align: alignment of the region
616 * @goal: preferred starting address of the region
617 *
618 * The goal is dropped if it can not be satisfied and the allocation will
619 * fall back to memory below @goal.
620 *
621 * Allocation may happen on any node in the system.
622 *
623 * Returns NULL on failure.
624 */
bb0923a6
FBH
625void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
626 unsigned long goal)
1da177e4 627{
679bc9fb 628 bootmem_data_t *bdata;
1da177e4
LT
629 void *ptr;
630
f71bf0ca 631 list_for_each_entry(bdata, &bdata_list, list) {
ffc6421f 632 ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
f71bf0ca
FBH
633 if (ptr)
634 return ptr;
635 }
a8062231
AK
636 return NULL;
637}
1da177e4 638
a66fd7da
JW
639/**
640 * __alloc_bootmem - allocate boot memory
641 * @size: size of the request in bytes
642 * @align: alignment of the region
643 * @goal: preferred starting address of the region
644 *
645 * The goal is dropped if it can not be satisfied and the allocation will
646 * fall back to memory below @goal.
647 *
648 * Allocation may happen on any node in the system.
649 *
650 * The function panics if the request can not be satisfied.
651 */
bb0923a6
FBH
652void * __init __alloc_bootmem(unsigned long size, unsigned long align,
653 unsigned long goal)
a8062231
AK
654{
655 void *mem = __alloc_bootmem_nopanic(size,align,goal);
f71bf0ca 656
a8062231
AK
657 if (mem)
658 return mem;
1da177e4
LT
659 /*
660 * Whoops, we cannot satisfy the allocation request.
661 */
662 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
663 panic("Out of memory");
664 return NULL;
665}
666
a66fd7da
JW
667/**
668 * __alloc_bootmem_node - allocate boot memory from a specific node
669 * @pgdat: node to allocate from
670 * @size: size of the request in bytes
671 * @align: alignment of the region
672 * @goal: preferred starting address of the region
673 *
674 * The goal is dropped if it can not be satisfied and the allocation will
675 * fall back to memory below @goal.
676 *
677 * Allocation may fall back to any node in the system if the specified node
678 * can not hold the requested memory.
679 *
680 * The function panics if the request can not be satisfied.
681 */
bb0923a6
FBH
682void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
683 unsigned long align, unsigned long goal)
1da177e4
LT
684{
685 void *ptr;
686
ffc6421f 687 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
1da177e4 688 if (ptr)
f71bf0ca 689 return ptr;
1da177e4 690
008857c1 691 return __alloc_bootmem(size, align, goal);
1da177e4
LT
692}
693
e70260aa 694#ifdef CONFIG_SPARSEMEM
a66fd7da
JW
695/**
696 * alloc_bootmem_section - allocate boot memory from a specific section
697 * @size: size of the request in bytes
698 * @section_nr: sparse map section to allocate from
699 *
700 * Return NULL on failure.
701 */
e70260aa
YG
702void * __init alloc_bootmem_section(unsigned long size,
703 unsigned long section_nr)
704{
705 void *ptr;
706 unsigned long limit, goal, start_nr, end_nr, pfn;
707 struct pglist_data *pgdat;
708
709 pfn = section_nr_to_pfn(section_nr);
710 goal = PFN_PHYS(pfn);
711 limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
712 pgdat = NODE_DATA(early_pfn_to_nid(pfn));
ffc6421f
JW
713 ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
714 limit);
e70260aa
YG
715
716 if (!ptr)
717 return NULL;
718
719 start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
720 end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
721 if (start_nr != section_nr || end_nr != section_nr) {
722 printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
723 section_nr);
724 free_bootmem_core(pgdat->bdata, __pa(ptr), size);
725 ptr = NULL;
726 }
727
728 return ptr;
729}
730#endif
731
b54bbf7b
AK
732void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
733 unsigned long align, unsigned long goal)
734{
735 void *ptr;
736
737 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
738 if (ptr)
739 return ptr;
740
741 return __alloc_bootmem_nopanic(size, align, goal);
742}
743
dfd54cbc
HC
744#ifndef ARCH_LOW_ADDRESS_LIMIT
745#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
746#endif
008857c1 747
a66fd7da
JW
748/**
749 * __alloc_bootmem_low - allocate low boot memory
750 * @size: size of the request in bytes
751 * @align: alignment of the region
752 * @goal: preferred starting address of the region
753 *
754 * The goal is dropped if it can not be satisfied and the allocation will
755 * fall back to memory below @goal.
756 *
757 * Allocation may happen on any node in the system.
758 *
759 * The function panics if the request can not be satisfied.
760 */
bb0923a6
FBH
761void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
762 unsigned long goal)
008857c1 763{
679bc9fb 764 bootmem_data_t *bdata;
008857c1
RT
765 void *ptr;
766
f71bf0ca 767 list_for_each_entry(bdata, &bdata_list, list) {
ffc6421f
JW
768 ptr = alloc_bootmem_core(bdata, size, align, goal,
769 ARCH_LOW_ADDRESS_LIMIT);
f71bf0ca
FBH
770 if (ptr)
771 return ptr;
772 }
008857c1
RT
773
774 /*
775 * Whoops, we cannot satisfy the allocation request.
776 */
777 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
778 panic("Out of low memory");
779 return NULL;
780}
781
a66fd7da
JW
782/**
783 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
784 * @pgdat: node to allocate from
785 * @size: size of the request in bytes
786 * @align: alignment of the region
787 * @goal: preferred starting address of the region
788 *
789 * The goal is dropped if it can not be satisfied and the allocation will
790 * fall back to memory below @goal.
791 *
792 * Allocation may fall back to any node in the system if the specified node
793 * can not hold the requested memory.
794 *
795 * The function panics if the request can not be satisfied.
796 */
008857c1
RT
797void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
798 unsigned long align, unsigned long goal)
799{
ffc6421f
JW
800 return alloc_bootmem_core(pgdat->bdata, size, align, goal,
801 ARCH_LOW_ADDRESS_LIMIT);
008857c1 802}