2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock;
26 int memblock_can_resize;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1];
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1];
30 #define MEMBLOCK_ERROR (~(phys_addr_t)0)
32 /* inline so we don't get a warning when pr_debug is compiled out */
33 static inline const char *memblock_type_name(struct memblock_type *type)
35 if (type == &memblock.memory)
37 else if (type == &memblock.reserved)
44 * Address comparison utilities
47 static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
49 return addr & ~(size - 1);
52 static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
54 return (addr + (size - 1)) & ~(size - 1);
57 static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
58 phys_addr_t base2, phys_addr_t size2)
60 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
63 static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
64 phys_addr_t base2, phys_addr_t size2)
66 if (base2 == base1 + size1)
68 else if (base1 == base2 + size2)
74 static long memblock_regions_adjacent(struct memblock_type *type,
75 unsigned long r1, unsigned long r2)
77 phys_addr_t base1 = type->regions[r1].base;
78 phys_addr_t size1 = type->regions[r1].size;
79 phys_addr_t base2 = type->regions[r2].base;
80 phys_addr_t size2 = type->regions[r2].size;
82 return memblock_addrs_adjacent(base1, size1, base2, size2);
85 long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
89 for (i = 0; i < type->cnt; i++) {
90 phys_addr_t rgnbase = type->regions[i].base;
91 phys_addr_t rgnsize = type->regions[i].size;
92 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
96 return (i < type->cnt) ? i : -1;
100 * Find, allocate, deallocate or reserve unreserved regions. All allocations
104 static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
105 phys_addr_t size, phys_addr_t align)
107 phys_addr_t base, res_base;
110 base = memblock_align_down((end - size), align);
111 while (start <= base) {
112 j = memblock_overlaps_region(&memblock.reserved, base, size);
115 res_base = memblock.reserved.regions[j].base;
118 base = memblock_align_down(res_base - size, align);
121 return MEMBLOCK_ERROR;
124 static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align,
125 phys_addr_t start, phys_addr_t end)
131 size = memblock_align_up(size, align);
133 /* Pump up max_addr */
134 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
135 end = memblock.current_limit;
137 /* We do a top-down search, this tends to limit memory
138 * fragmentation by keeping early boot allocs near the
141 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
142 phys_addr_t memblockbase = memblock.memory.regions[i].base;
143 phys_addr_t memblocksize = memblock.memory.regions[i].size;
144 phys_addr_t bottom, top, found;
146 if (memblocksize < size)
148 if ((memblockbase + memblocksize) <= start)
150 bottom = max(memblockbase, start);
151 top = min(memblockbase + memblocksize, end);
154 found = memblock_find_region(bottom, top, size, align);
155 if (found != MEMBLOCK_ERROR)
158 return MEMBLOCK_ERROR;
161 static void memblock_remove_region(struct memblock_type *type, unsigned long r)
165 for (i = r; i < type->cnt - 1; i++) {
166 type->regions[i].base = type->regions[i + 1].base;
167 type->regions[i].size = type->regions[i + 1].size;
172 /* Assumption: base addr of region 1 < base addr of region 2 */
173 static void memblock_coalesce_regions(struct memblock_type *type,
174 unsigned long r1, unsigned long r2)
176 type->regions[r1].size += type->regions[r2].size;
177 memblock_remove_region(type, r2);
180 /* Defined below but needed now */
181 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
183 static int memblock_double_array(struct memblock_type *type)
185 struct memblock_region *new_array, *old_array;
186 phys_addr_t old_size, new_size, addr;
187 int use_slab = slab_is_available();
189 /* We don't allow resizing until we know about the reserved regions
190 * of memory that aren't suitable for allocation
192 if (!memblock_can_resize)
195 pr_debug("memblock: %s array full, doubling...", memblock_type_name(type));
197 /* Calculate new doubled size */
198 old_size = type->max * sizeof(struct memblock_region);
199 new_size = old_size << 1;
201 /* Try to find some space for it.
203 * WARNING: We assume that either slab_is_available() and we use it or
204 * we use MEMBLOCK for allocations. That means that this is unsafe to use
205 * when bootmem is currently active (unless bootmem itself is implemented
206 * on top of MEMBLOCK which isn't the case yet)
208 * This should however not be an issue for now, as we currently only
209 * call into MEMBLOCK while it's still active, or much later when slab is
210 * active for memory hotplug operations
213 new_array = kmalloc(new_size, GFP_KERNEL);
214 addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
216 addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
217 if (addr == MEMBLOCK_ERROR) {
218 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
219 memblock_type_name(type), type->max, type->max * 2);
222 new_array = __va(addr);
224 /* Found space, we now need to move the array over before
225 * we add the reserved region since it may be our reserved
226 * array itself that is full.
228 memcpy(new_array, type->regions, old_size);
229 memset(new_array + type->max, 0, old_size);
230 old_array = type->regions;
231 type->regions = new_array;
234 /* If we use SLAB that's it, we are done */
238 /* Add the new reserved region now. Should not fail ! */
239 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
241 /* If the array wasn't our static init one, then free it. We only do
242 * that before SLAB is available as later on, we don't know whether
243 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
246 if (old_array != memblock_memory_init_regions &&
247 old_array != memblock_reserved_init_regions)
248 memblock_free(__pa(old_array), old_size);
253 extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
254 phys_addr_t addr2, phys_addr_t size2)
259 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
261 unsigned long coalesced = 0;
264 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
265 type->regions[0].base = base;
266 type->regions[0].size = size;
270 /* First try and coalesce this MEMBLOCK with another. */
271 for (i = 0; i < type->cnt; i++) {
272 phys_addr_t rgnbase = type->regions[i].base;
273 phys_addr_t rgnsize = type->regions[i].size;
275 if ((rgnbase == base) && (rgnsize == size))
276 /* Already have this region, so we're done */
279 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
280 /* Check if arch allows coalescing */
281 if (adjacent != 0 && type == &memblock.memory &&
282 !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
285 type->regions[i].base -= size;
286 type->regions[i].size += size;
289 } else if (adjacent < 0) {
290 type->regions[i].size += size;
296 /* If we plugged a hole, we may want to also coalesce with the
299 if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
300 ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
301 type->regions[i].size,
302 type->regions[i+1].base,
303 type->regions[i+1].size)))) {
304 memblock_coalesce_regions(type, i, i+1);
311 /* If we are out of space, we fail. It's too late to resize the array
312 * but then this shouldn't have happened in the first place.
314 if (WARN_ON(type->cnt >= type->max))
317 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
318 for (i = type->cnt - 1; i >= 0; i--) {
319 if (base < type->regions[i].base) {
320 type->regions[i+1].base = type->regions[i].base;
321 type->regions[i+1].size = type->regions[i].size;
323 type->regions[i+1].base = base;
324 type->regions[i+1].size = size;
329 if (base < type->regions[0].base) {
330 type->regions[0].base = base;
331 type->regions[0].size = size;
335 /* The array is full ? Try to resize it. If that fails, we undo
336 * our allocation and return an error
338 if (type->cnt == type->max && memblock_double_array(type)) {
346 long memblock_add(phys_addr_t base, phys_addr_t size)
348 return memblock_add_region(&memblock.memory, base, size);
352 static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
354 phys_addr_t rgnbegin, rgnend;
355 phys_addr_t end = base + size;
358 rgnbegin = rgnend = 0; /* supress gcc warnings */
360 /* Find the region where (base, size) belongs to */
361 for (i=0; i < type->cnt; i++) {
362 rgnbegin = type->regions[i].base;
363 rgnend = rgnbegin + type->regions[i].size;
365 if ((rgnbegin <= base) && (end <= rgnend))
369 /* Didn't find the region */
373 /* Check to see if we are removing entire region */
374 if ((rgnbegin == base) && (rgnend == end)) {
375 memblock_remove_region(type, i);
379 /* Check to see if region is matching at the front */
380 if (rgnbegin == base) {
381 type->regions[i].base = end;
382 type->regions[i].size -= size;
386 /* Check to see if the region is matching at the end */
388 type->regions[i].size -= size;
393 * We need to split the entry - adjust the current one to the
394 * beginging of the hole and add the region after hole.
396 type->regions[i].size = base - type->regions[i].base;
397 return memblock_add_region(type, end, rgnend - end);
400 long memblock_remove(phys_addr_t base, phys_addr_t size)
402 return __memblock_remove(&memblock.memory, base, size);
405 long __init memblock_free(phys_addr_t base, phys_addr_t size)
407 return __memblock_remove(&memblock.reserved, base, size);
410 long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
412 struct memblock_type *_rgn = &memblock.reserved;
416 return memblock_add_region(_rgn, base, size);
419 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
423 /* We align the size to limit fragmentation. Without this, a lot of
424 * small allocs quickly eat up the whole reserve array on sparc
426 size = memblock_align_up(size, align);
428 found = memblock_find_base(size, align, 0, max_addr);
429 if (found != MEMBLOCK_ERROR &&
430 memblock_add_region(&memblock.reserved, found, size) >= 0)
436 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
440 alloc = __memblock_alloc_base(size, align, max_addr);
443 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
444 (unsigned long long) size, (unsigned long long) max_addr);
449 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
451 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
456 * Additional node-local allocators. Search for node memory is bottom up
457 * and walks memblock regions within that node bottom-up as well, but allocation
458 * within an memblock region is top-down. XXX I plan to fix that at some stage
460 * WARNING: Only available after early_node_map[] has been populated,
461 * on some architectures, that is after all the calls to add_active_range()
462 * have been done to populate it.
465 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
467 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
469 * This code originates from sparc which really wants use to walk by addresses
470 * and returns the nid. This is not very convenient for early_pfn_map[] users
471 * as the map isn't sorted yet, and it really wants to be walked by nid.
473 * For now, I implement the inefficient method below which walks the early
474 * map multiple times. Eventually we may want to use an ARCH config option
475 * to implement a completely different method for both case.
477 unsigned long start_pfn, end_pfn;
480 for (i = 0; i < MAX_NUMNODES; i++) {
481 get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
482 if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
485 return min(end, PFN_PHYS(end_pfn));
493 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
495 phys_addr_t align, int nid)
497 phys_addr_t start, end;
500 end = start + mp->size;
502 start = memblock_align_up(start, align);
503 while (start < end) {
504 phys_addr_t this_end;
507 this_end = memblock_nid_range(start, end, &this_nid);
508 if (this_nid == nid) {
509 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
510 if (ret != MEMBLOCK_ERROR &&
511 memblock_add_region(&memblock.reserved, ret, size) >= 0)
517 return MEMBLOCK_ERROR;
520 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
522 struct memblock_type *mem = &memblock.memory;
527 /* We align the size to limit fragmentation. Without this, a lot of
528 * small allocs quickly eat up the whole reserve array on sparc
530 size = memblock_align_up(size, align);
532 /* We do a bottom-up search for a region with the right
533 * nid since that's easier considering how memblock_nid_range()
536 for (i = 0; i < mem->cnt; i++) {
537 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
539 if (ret != MEMBLOCK_ERROR)
546 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
548 phys_addr_t res = memblock_alloc_nid(size, align, nid);
552 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
557 * Remaining API functions
560 /* You must call memblock_analyze() before this. */
561 phys_addr_t __init memblock_phys_mem_size(void)
563 return memblock.memory_size;
566 phys_addr_t memblock_end_of_DRAM(void)
568 int idx = memblock.memory.cnt - 1;
570 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
573 /* You must call memblock_analyze() after this. */
574 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
578 struct memblock_region *p;
583 /* Truncate the memblock regions to satisfy the memory limit. */
584 limit = memory_limit;
585 for (i = 0; i < memblock.memory.cnt; i++) {
586 if (limit > memblock.memory.regions[i].size) {
587 limit -= memblock.memory.regions[i].size;
591 memblock.memory.regions[i].size = limit;
592 memblock.memory.cnt = i + 1;
596 memory_limit = memblock_end_of_DRAM();
598 /* And truncate any reserves above the limit also. */
599 for (i = 0; i < memblock.reserved.cnt; i++) {
600 p = &memblock.reserved.regions[i];
602 if (p->base > memory_limit)
604 else if ((p->base + p->size) > memory_limit)
605 p->size = memory_limit - p->base;
608 memblock_remove_region(&memblock.reserved, i);
614 static int memblock_search(struct memblock_type *type, phys_addr_t addr)
616 unsigned int left = 0, right = type->cnt;
619 unsigned int mid = (right + left) / 2;
621 if (addr < type->regions[mid].base)
623 else if (addr >= (type->regions[mid].base +
624 type->regions[mid].size))
628 } while (left < right);
632 int __init memblock_is_reserved(phys_addr_t addr)
634 return memblock_search(&memblock.reserved, addr) != -1;
637 int memblock_is_memory(phys_addr_t addr)
639 return memblock_search(&memblock.memory, addr) != -1;
642 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
644 int idx = memblock_search(&memblock.reserved, base);
648 return memblock.reserved.regions[idx].base <= base &&
649 (memblock.reserved.regions[idx].base +
650 memblock.reserved.regions[idx].size) >= (base + size);
653 int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
655 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
659 void __init memblock_set_current_limit(phys_addr_t limit)
661 memblock.current_limit = limit;
664 static void memblock_dump(struct memblock_type *region, char *name)
666 unsigned long long base, size;
669 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
671 for (i = 0; i < region->cnt; i++) {
672 base = region->regions[i].base;
673 size = region->regions[i].size;
675 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
676 name, i, base, base + size - 1, size);
680 void memblock_dump_all(void)
685 pr_info("MEMBLOCK configuration:\n");
686 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
688 memblock_dump(&memblock.memory, "memory");
689 memblock_dump(&memblock.reserved, "reserved");
692 void __init memblock_analyze(void)
696 /* Check marker in the unused last array entry */
697 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
698 != (phys_addr_t)RED_INACTIVE);
699 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
700 != (phys_addr_t)RED_INACTIVE);
702 memblock.memory_size = 0;
704 for (i = 0; i < memblock.memory.cnt; i++)
705 memblock.memory_size += memblock.memory.regions[i].size;
707 /* We allow resizing from there */
708 memblock_can_resize = 1;
711 void __init memblock_init(void)
713 /* Hookup the initial arrays */
714 memblock.memory.regions = memblock_memory_init_regions;
715 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
716 memblock.reserved.regions = memblock_reserved_init_regions;
717 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
719 /* Write a marker in the unused last array entry */
720 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
721 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
723 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
724 * This simplifies the memblock_add() code below...
726 memblock.memory.regions[0].base = 0;
727 memblock.memory.regions[0].size = 0;
728 memblock.memory.cnt = 1;
731 memblock.reserved.regions[0].base = 0;
732 memblock.reserved.regions[0].size = 0;
733 memblock.reserved.cnt = 1;
735 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
738 static int __init early_memblock(char *p)
740 if (p && strstr(p, "debug"))
744 early_param("memblock", early_memblock);
746 #ifdef CONFIG_DEBUG_FS
748 static int memblock_debug_show(struct seq_file *m, void *private)
750 struct memblock_type *type = m->private;
751 struct memblock_region *reg;
754 for (i = 0; i < type->cnt; i++) {
755 reg = &type->regions[i];
756 seq_printf(m, "%4d: ", i);
757 if (sizeof(phys_addr_t) == 4)
758 seq_printf(m, "0x%08lx..0x%08lx\n",
759 (unsigned long)reg->base,
760 (unsigned long)(reg->base + reg->size - 1));
762 seq_printf(m, "0x%016llx..0x%016llx\n",
763 (unsigned long long)reg->base,
764 (unsigned long long)(reg->base + reg->size - 1));
770 static int memblock_debug_open(struct inode *inode, struct file *file)
772 return single_open(file, memblock_debug_show, inode->i_private);
775 static const struct file_operations memblock_debug_fops = {
776 .open = memblock_debug_open,
779 .release = single_release,
782 static int __init memblock_init_debugfs(void)
784 struct dentry *root = debugfs_create_dir("memblock", NULL);
787 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
788 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
792 __initcall(memblock_init_debugfs);
794 #endif /* CONFIG_DEBUG_FS */