2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock __initdata_memblock;
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
33 if (type == &memblock.memory)
35 else if (type == &memblock.reserved)
42 * Address comparison utilities
45 static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size)
47 return addr & ~(size - 1);
50 static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size)
52 return (addr + (size - 1)) & ~(size - 1);
55 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
56 phys_addr_t base2, phys_addr_t size2)
58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
61 static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
62 phys_addr_t base2, phys_addr_t size2)
64 if (base2 == base1 + size1)
66 else if (base1 == base2 + size2)
72 static long __init_memblock memblock_regions_adjacent(struct memblock_type *type,
73 unsigned long r1, unsigned long r2)
75 phys_addr_t base1 = type->regions[r1].base;
76 phys_addr_t size1 = type->regions[r1].size;
77 phys_addr_t base2 = type->regions[r2].base;
78 phys_addr_t size2 = type->regions[r2].size;
80 return memblock_addrs_adjacent(base1, size1, base2, size2);
83 long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
87 for (i = 0; i < type->cnt; i++) {
88 phys_addr_t rgnbase = type->regions[i].base;
89 phys_addr_t rgnsize = type->regions[i].size;
90 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
94 return (i < type->cnt) ? i : -1;
98 * Find, allocate, deallocate or reserve unreserved regions. All allocations
102 static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
103 phys_addr_t size, phys_addr_t align)
105 phys_addr_t base, res_base;
108 /* Prevent allocations returning 0 as it's also used to
109 * indicate an allocation failure
114 base = memblock_align_down((end - size), align);
115 while (start <= base) {
116 j = memblock_overlaps_region(&memblock.reserved, base, size);
119 res_base = memblock.reserved.regions[j].base;
122 base = memblock_align_down(res_base - size, align);
125 return MEMBLOCK_ERROR;
128 static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align,
129 phys_addr_t start, phys_addr_t end)
135 size = memblock_align_up(size, align);
137 /* Pump up max_addr */
138 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
139 end = memblock.current_limit;
141 /* We do a top-down search, this tends to limit memory
142 * fragmentation by keeping early boot allocs near the
145 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
146 phys_addr_t memblockbase = memblock.memory.regions[i].base;
147 phys_addr_t memblocksize = memblock.memory.regions[i].size;
148 phys_addr_t bottom, top, found;
150 if (memblocksize < size)
152 if ((memblockbase + memblocksize) <= start)
154 bottom = max(memblockbase, start);
155 top = min(memblockbase + memblocksize, end);
158 found = memblock_find_region(bottom, top, size, align);
159 if (found != MEMBLOCK_ERROR)
162 return MEMBLOCK_ERROR;
165 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
169 for (i = r; i < type->cnt - 1; i++) {
170 type->regions[i].base = type->regions[i + 1].base;
171 type->regions[i].size = type->regions[i + 1].size;
176 /* Assumption: base addr of region 1 < base addr of region 2 */
177 static void __init_memblock memblock_coalesce_regions(struct memblock_type *type,
178 unsigned long r1, unsigned long r2)
180 type->regions[r1].size += type->regions[r2].size;
181 memblock_remove_region(type, r2);
184 /* Defined below but needed now */
185 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
187 static int __init_memblock memblock_double_array(struct memblock_type *type)
189 struct memblock_region *new_array, *old_array;
190 phys_addr_t old_size, new_size, addr;
191 int use_slab = slab_is_available();
193 /* We don't allow resizing until we know about the reserved regions
194 * of memory that aren't suitable for allocation
196 if (!memblock_can_resize)
199 /* Calculate new doubled size */
200 old_size = type->max * sizeof(struct memblock_region);
201 new_size = old_size << 1;
203 /* Try to find some space for it.
205 * WARNING: We assume that either slab_is_available() and we use it or
206 * we use MEMBLOCK for allocations. That means that this is unsafe to use
207 * when bootmem is currently active (unless bootmem itself is implemented
208 * on top of MEMBLOCK which isn't the case yet)
210 * This should however not be an issue for now, as we currently only
211 * call into MEMBLOCK while it's still active, or much later when slab is
212 * active for memory hotplug operations
215 new_array = kmalloc(new_size, GFP_KERNEL);
216 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
218 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
219 if (addr == MEMBLOCK_ERROR) {
220 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
221 memblock_type_name(type), type->max, type->max * 2);
224 new_array = __va(addr);
226 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
227 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
229 /* Found space, we now need to move the array over before
230 * we add the reserved region since it may be our reserved
231 * array itself that is full.
233 memcpy(new_array, type->regions, old_size);
234 memset(new_array + type->max, 0, old_size);
235 old_array = type->regions;
236 type->regions = new_array;
239 /* If we use SLAB that's it, we are done */
243 /* Add the new reserved region now. Should not fail ! */
244 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
246 /* If the array wasn't our static init one, then free it. We only do
247 * that before SLAB is available as later on, we don't know whether
248 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
251 if (old_array != memblock_memory_init_regions &&
252 old_array != memblock_reserved_init_regions)
253 memblock_free(__pa(old_array), old_size);
258 extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
259 phys_addr_t addr2, phys_addr_t size2)
264 static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
266 unsigned long coalesced = 0;
269 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
270 type->regions[0].base = base;
271 type->regions[0].size = size;
275 /* First try and coalesce this MEMBLOCK with another. */
276 for (i = 0; i < type->cnt; i++) {
277 phys_addr_t rgnbase = type->regions[i].base;
278 phys_addr_t rgnsize = type->regions[i].size;
280 if ((rgnbase == base) && (rgnsize == size))
281 /* Already have this region, so we're done */
284 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
285 /* Check if arch allows coalescing */
286 if (adjacent != 0 && type == &memblock.memory &&
287 !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
290 type->regions[i].base -= size;
291 type->regions[i].size += size;
294 } else if (adjacent < 0) {
295 type->regions[i].size += size;
301 /* If we plugged a hole, we may want to also coalesce with the
304 if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
305 ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
306 type->regions[i].size,
307 type->regions[i+1].base,
308 type->regions[i+1].size)))) {
309 memblock_coalesce_regions(type, i, i+1);
316 /* If we are out of space, we fail. It's too late to resize the array
317 * but then this shouldn't have happened in the first place.
319 if (WARN_ON(type->cnt >= type->max))
322 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
323 for (i = type->cnt - 1; i >= 0; i--) {
324 if (base < type->regions[i].base) {
325 type->regions[i+1].base = type->regions[i].base;
326 type->regions[i+1].size = type->regions[i].size;
328 type->regions[i+1].base = base;
329 type->regions[i+1].size = size;
334 if (base < type->regions[0].base) {
335 type->regions[0].base = base;
336 type->regions[0].size = size;
340 /* The array is full ? Try to resize it. If that fails, we undo
341 * our allocation and return an error
343 if (type->cnt == type->max && memblock_double_array(type)) {
351 long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
353 return memblock_add_region(&memblock.memory, base, size);
357 static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
359 phys_addr_t rgnbegin, rgnend;
360 phys_addr_t end = base + size;
363 rgnbegin = rgnend = 0; /* supress gcc warnings */
365 /* Find the region where (base, size) belongs to */
366 for (i=0; i < type->cnt; i++) {
367 rgnbegin = type->regions[i].base;
368 rgnend = rgnbegin + type->regions[i].size;
370 if ((rgnbegin <= base) && (end <= rgnend))
374 /* Didn't find the region */
378 /* Check to see if we are removing entire region */
379 if ((rgnbegin == base) && (rgnend == end)) {
380 memblock_remove_region(type, i);
384 /* Check to see if region is matching at the front */
385 if (rgnbegin == base) {
386 type->regions[i].base = end;
387 type->regions[i].size -= size;
391 /* Check to see if the region is matching at the end */
393 type->regions[i].size -= size;
398 * We need to split the entry - adjust the current one to the
399 * beginging of the hole and add the region after hole.
401 type->regions[i].size = base - type->regions[i].base;
402 return memblock_add_region(type, end, rgnend - end);
405 long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
407 return __memblock_remove(&memblock.memory, base, size);
410 long __init memblock_free(phys_addr_t base, phys_addr_t size)
412 return __memblock_remove(&memblock.reserved, base, size);
415 long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
417 struct memblock_type *_rgn = &memblock.reserved;
421 return memblock_add_region(_rgn, base, size);
424 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
428 /* We align the size to limit fragmentation. Without this, a lot of
429 * small allocs quickly eat up the whole reserve array on sparc
431 size = memblock_align_up(size, align);
433 found = memblock_find_base(size, align, 0, max_addr);
434 if (found != MEMBLOCK_ERROR &&
435 memblock_add_region(&memblock.reserved, found, size) >= 0)
441 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
445 alloc = __memblock_alloc_base(size, align, max_addr);
448 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
449 (unsigned long long) size, (unsigned long long) max_addr);
454 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
456 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
461 * Additional node-local allocators. Search for node memory is bottom up
462 * and walks memblock regions within that node bottom-up as well, but allocation
463 * within an memblock region is top-down. XXX I plan to fix that at some stage
465 * WARNING: Only available after early_node_map[] has been populated,
466 * on some architectures, that is after all the calls to add_active_range()
467 * have been done to populate it.
470 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
472 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
474 * This code originates from sparc which really wants use to walk by addresses
475 * and returns the nid. This is not very convenient for early_pfn_map[] users
476 * as the map isn't sorted yet, and it really wants to be walked by nid.
478 * For now, I implement the inefficient method below which walks the early
479 * map multiple times. Eventually we may want to use an ARCH config option
480 * to implement a completely different method for both case.
482 unsigned long start_pfn, end_pfn;
485 for (i = 0; i < MAX_NUMNODES; i++) {
486 get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
487 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
490 return min(end, PFN_PHYS(end_pfn));
498 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
500 phys_addr_t align, int nid)
502 phys_addr_t start, end;
505 end = start + mp->size;
507 start = memblock_align_up(start, align);
508 while (start < end) {
509 phys_addr_t this_end;
512 this_end = memblock_nid_range(start, end, &this_nid);
513 if (this_nid == nid) {
514 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
515 if (ret != MEMBLOCK_ERROR &&
516 memblock_add_region(&memblock.reserved, ret, size) >= 0)
522 return MEMBLOCK_ERROR;
525 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
527 struct memblock_type *mem = &memblock.memory;
532 /* We align the size to limit fragmentation. Without this, a lot of
533 * small allocs quickly eat up the whole reserve array on sparc
535 size = memblock_align_up(size, align);
537 /* We do a bottom-up search for a region with the right
538 * nid since that's easier considering how memblock_nid_range()
541 for (i = 0; i < mem->cnt; i++) {
542 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
544 if (ret != MEMBLOCK_ERROR)
551 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
553 phys_addr_t res = memblock_alloc_nid(size, align, nid);
557 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
562 * Remaining API functions
565 /* You must call memblock_analyze() before this. */
566 phys_addr_t __init memblock_phys_mem_size(void)
568 return memblock.memory_size;
571 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
573 int idx = memblock.memory.cnt - 1;
575 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
578 /* You must call memblock_analyze() after this. */
579 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
583 struct memblock_region *p;
588 /* Truncate the memblock regions to satisfy the memory limit. */
589 limit = memory_limit;
590 for (i = 0; i < memblock.memory.cnt; i++) {
591 if (limit > memblock.memory.regions[i].size) {
592 limit -= memblock.memory.regions[i].size;
596 memblock.memory.regions[i].size = limit;
597 memblock.memory.cnt = i + 1;
601 memory_limit = memblock_end_of_DRAM();
603 /* And truncate any reserves above the limit also. */
604 for (i = 0; i < memblock.reserved.cnt; i++) {
605 p = &memblock.reserved.regions[i];
607 if (p->base > memory_limit)
609 else if ((p->base + p->size) > memory_limit)
610 p->size = memory_limit - p->base;
613 memblock_remove_region(&memblock.reserved, i);
619 static int memblock_search(struct memblock_type *type, phys_addr_t addr)
621 unsigned int left = 0, right = type->cnt;
624 unsigned int mid = (right + left) / 2;
626 if (addr < type->regions[mid].base)
628 else if (addr >= (type->regions[mid].base +
629 type->regions[mid].size))
633 } while (left < right);
637 int __init memblock_is_reserved(phys_addr_t addr)
639 return memblock_search(&memblock.reserved, addr) != -1;
642 int memblock_is_memory(phys_addr_t addr)
644 return memblock_search(&memblock.memory, addr) != -1;
647 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
649 int idx = memblock_search(&memblock.reserved, base);
653 return memblock.reserved.regions[idx].base <= base &&
654 (memblock.reserved.regions[idx].base +
655 memblock.reserved.regions[idx].size) >= (base + size);
658 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
660 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
664 void __init memblock_set_current_limit(phys_addr_t limit)
666 memblock.current_limit = limit;
669 static void __init_memblock memblock_dump(struct memblock_type *region, char *name)
671 unsigned long long base, size;
674 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
676 for (i = 0; i < region->cnt; i++) {
677 base = region->regions[i].base;
678 size = region->regions[i].size;
680 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
681 name, i, base, base + size - 1, size);
685 void __init_memblock memblock_dump_all(void)
690 pr_info("MEMBLOCK configuration:\n");
691 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
693 memblock_dump(&memblock.memory, "memory");
694 memblock_dump(&memblock.reserved, "reserved");
697 void __init memblock_analyze(void)
701 /* Check marker in the unused last array entry */
702 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
703 != (phys_addr_t)RED_INACTIVE);
704 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
705 != (phys_addr_t)RED_INACTIVE);
707 memblock.memory_size = 0;
709 for (i = 0; i < memblock.memory.cnt; i++)
710 memblock.memory_size += memblock.memory.regions[i].size;
712 /* We allow resizing from there */
713 memblock_can_resize = 1;
716 void __init memblock_init(void)
718 /* Hookup the initial arrays */
719 memblock.memory.regions = memblock_memory_init_regions;
720 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
721 memblock.reserved.regions = memblock_reserved_init_regions;
722 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
724 /* Write a marker in the unused last array entry */
725 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
726 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
728 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
729 * This simplifies the memblock_add() code below...
731 memblock.memory.regions[0].base = 0;
732 memblock.memory.regions[0].size = 0;
733 memblock.memory.cnt = 1;
736 memblock.reserved.regions[0].base = 0;
737 memblock.reserved.regions[0].size = 0;
738 memblock.reserved.cnt = 1;
740 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
743 static int __init early_memblock(char *p)
745 if (p && strstr(p, "debug"))
749 early_param("memblock", early_memblock);
751 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
753 static int memblock_debug_show(struct seq_file *m, void *private)
755 struct memblock_type *type = m->private;
756 struct memblock_region *reg;
759 for (i = 0; i < type->cnt; i++) {
760 reg = &type->regions[i];
761 seq_printf(m, "%4d: ", i);
762 if (sizeof(phys_addr_t) == 4)
763 seq_printf(m, "0x%08lx..0x%08lx\n",
764 (unsigned long)reg->base,
765 (unsigned long)(reg->base + reg->size - 1));
767 seq_printf(m, "0x%016llx..0x%016llx\n",
768 (unsigned long long)reg->base,
769 (unsigned long long)(reg->base + reg->size - 1));
775 static int memblock_debug_open(struct inode *inode, struct file *file)
777 return single_open(file, memblock_debug_show, inode->i_private);
780 static const struct file_operations memblock_debug_fops = {
781 .open = memblock_debug_open,
784 .release = single_release,
787 static int __init memblock_init_debugfs(void)
789 struct dentry *root = debugfs_create_dir("memblock", NULL);
792 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
793 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
797 __initcall(memblock_init_debugfs);
799 #endif /* CONFIG_DEBUG_FS */