2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/ioport.h>
16 #include <linux/string.h>
17 #include <linux/kexec.h>
18 #include <linux/module.h>
20 #include <linux/pfn.h>
21 #include <linux/suspend.h>
22 #include <linux/firmware-map.h>
24 #include <asm/pgtable.h>
27 #include <asm/proto.h>
28 #include <asm/setup.h>
29 #include <asm/trampoline.h>
32 * The e820 map is the map that gets modified e.g. with command line parameters
33 * and that is also registered with modifications in the kernel resource tree
34 * with the iomem_resource as parent.
36 * The e820_saved is directly saved after the BIOS-provided memory map is
37 * copied. It doesn't get modified afterwards. It's registered for the
38 * /sys/firmware/memmap interface.
40 * That memory map is not modified and is used as base for kexec. The kexec'd
41 * kernel should get the same memory map as the firmware provides. Then the
42 * user can e.g. boot the original kernel with mem=1G while still booting the
43 * next kernel with full memory.
46 struct e820map e820_saved;
48 /* For PCI or other memory-mapped resources */
49 unsigned long pci_mem_start = 0xaeedbabe;
51 EXPORT_SYMBOL(pci_mem_start);
55 * This function checks if any part of the range <start,end> is mapped
59 e820_any_mapped(u64 start, u64 end, unsigned type)
63 for (i = 0; i < e820.nr_map; i++) {
64 struct e820entry *ei = &e820.map[i];
66 if (type && ei->type != type)
68 if (ei->addr >= end || ei->addr + ei->size <= start)
74 EXPORT_SYMBOL_GPL(e820_any_mapped);
77 * This function checks if the entire range <start,end> is mapped with type.
79 * Note: this function only works correct if the e820 table is sorted and
80 * not-overlapping, which is the case
82 int __init e820_all_mapped(u64 start, u64 end, unsigned type)
86 for (i = 0; i < e820.nr_map; i++) {
87 struct e820entry *ei = &e820.map[i];
89 if (type && ei->type != type)
91 /* is the region (part) in overlap with the current region ?*/
92 if (ei->addr >= end || ei->addr + ei->size <= start)
95 /* if the region is at the beginning of <start,end> we move
96 * start to the end of the region since it's ok until there
98 if (ei->addr <= start)
99 start = ei->addr + ei->size;
101 * if start is now at or beyond end, we're done, full
111 * Add a memory region to the kernel e820 map.
113 static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
116 int x = e820x->nr_map;
118 if (x >= ARRAY_SIZE(e820x->map)) {
119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
123 e820x->map[x].addr = start;
124 e820x->map[x].size = size;
125 e820x->map[x].type = type;
129 void __init e820_add_region(u64 start, u64 size, int type)
131 __e820_add_region(&e820, start, size, type);
134 static void __init e820_print_type(u32 type)
138 case E820_RESERVED_KERN:
139 printk(KERN_CONT "(usable)");
142 printk(KERN_CONT "(reserved)");
145 printk(KERN_CONT "(ACPI data)");
148 printk(KERN_CONT "(ACPI NVS)");
151 printk(KERN_CONT "(unusable)");
154 printk(KERN_CONT "type %u", type);
159 void __init e820_print_map(char *who)
163 for (i = 0; i < e820.nr_map; i++) {
164 printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
165 (unsigned long long) e820.map[i].addr,
167 (e820.map[i].addr + e820.map[i].size));
168 e820_print_type(e820.map[i].type);
169 printk(KERN_CONT "\n");
174 * Sanitize the BIOS e820 map.
176 * Some e820 responses include overlapping entries. The following
177 * replaces the original e820 map with a new one, removing overlaps,
178 * and resolving conflicting memory types in favor of highest
181 * The input parameter biosmap points to an array of 'struct
182 * e820entry' which on entry has elements in the range [0, *pnr_map)
183 * valid, and which has space for up to max_nr_map entries.
184 * On return, the resulting sanitized e820 map entries will be in
185 * overwritten in the same location, starting at biosmap.
187 * The integer pointed to by pnr_map must be valid on entry (the
188 * current number of valid entries located at biosmap) and will
189 * be updated on return, with the new number of valid entries
190 * (something no more than max_nr_map.)
192 * The return value from sanitize_e820_map() is zero if it
193 * successfully 'sanitized' the map entries passed in, and is -1
194 * if it did nothing, which can happen if either of (1) it was
195 * only passed one map entry, or (2) any of the input map entries
196 * were invalid (start + size < start, meaning that the size was
197 * so big the described memory range wrapped around through zero.)
199 * Visually we're performing the following
200 * (1,2,3,4 = memory types)...
202 * Sample memory map (w/overlaps):
203 * ____22__________________
204 * ______________________4_
205 * ____1111________________
206 * _44_____________________
207 * 11111111________________
208 * ____________________33__
209 * ___________44___________
210 * __________33333_________
211 * ______________22________
212 * ___________________2222_
213 * _________111111111______
214 * _____________________11_
215 * _________________4______
217 * Sanitized equivalent (no overlap):
218 * 1_______________________
219 * _44_____________________
220 * ___1____________________
221 * ____22__________________
222 * ______11________________
223 * _________1______________
224 * __________3_____________
225 * ___________44___________
226 * _____________33_________
227 * _______________2________
228 * ________________1_______
229 * _________________4______
230 * ___________________2____
231 * ____________________33__
232 * ______________________4_
235 int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
238 struct change_member {
239 struct e820entry *pbios; /* pointer to original bios entry */
240 unsigned long long addr; /* address for this change point */
242 static struct change_member change_point_list[2*E820_X_MAX] __initdata;
243 static struct change_member *change_point[2*E820_X_MAX] __initdata;
244 static struct e820entry *overlap_list[E820_X_MAX] __initdata;
245 static struct e820entry new_bios[E820_X_MAX] __initdata;
246 struct change_member *change_tmp;
247 unsigned long current_type, last_type;
248 unsigned long long last_addr;
249 int chgidx, still_changing;
252 int old_nr, new_nr, chg_nr;
255 /* if there's only one memory region, don't bother */
260 BUG_ON(old_nr > max_nr_map);
262 /* bail out if we find any unreasonable addresses in bios map */
263 for (i = 0; i < old_nr; i++)
264 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
267 /* create pointers for initial change-point information (for sorting) */
268 for (i = 0; i < 2 * old_nr; i++)
269 change_point[i] = &change_point_list[i];
271 /* record all known change-points (starting and ending addresses),
272 omitting those that are for empty memory regions */
274 for (i = 0; i < old_nr; i++) {
275 if (biosmap[i].size != 0) {
276 change_point[chgidx]->addr = biosmap[i].addr;
277 change_point[chgidx++]->pbios = &biosmap[i];
278 change_point[chgidx]->addr = biosmap[i].addr +
280 change_point[chgidx++]->pbios = &biosmap[i];
285 /* sort change-point list by memory addresses (low -> high) */
287 while (still_changing) {
289 for (i = 1; i < chg_nr; i++) {
290 unsigned long long curaddr, lastaddr;
291 unsigned long long curpbaddr, lastpbaddr;
293 curaddr = change_point[i]->addr;
294 lastaddr = change_point[i - 1]->addr;
295 curpbaddr = change_point[i]->pbios->addr;
296 lastpbaddr = change_point[i - 1]->pbios->addr;
299 * swap entries, when:
301 * curaddr > lastaddr or
302 * curaddr == lastaddr and curaddr == curpbaddr and
303 * lastaddr != lastpbaddr
305 if (curaddr < lastaddr ||
306 (curaddr == lastaddr && curaddr == curpbaddr &&
307 lastaddr != lastpbaddr)) {
308 change_tmp = change_point[i];
309 change_point[i] = change_point[i-1];
310 change_point[i-1] = change_tmp;
316 /* create a new bios memory map, removing overlaps */
317 overlap_entries = 0; /* number of entries in the overlap table */
318 new_bios_entry = 0; /* index for creating new bios map entries */
319 last_type = 0; /* start with undefined memory type */
320 last_addr = 0; /* start with 0 as last starting address */
322 /* loop through change-points, determining affect on the new bios map */
323 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
324 /* keep track of all overlapping bios entries */
325 if (change_point[chgidx]->addr ==
326 change_point[chgidx]->pbios->addr) {
328 * add map entry to overlap list (> 1 entry
329 * implies an overlap)
331 overlap_list[overlap_entries++] =
332 change_point[chgidx]->pbios;
335 * remove entry from list (order independent,
338 for (i = 0; i < overlap_entries; i++) {
339 if (overlap_list[i] ==
340 change_point[chgidx]->pbios)
342 overlap_list[overlap_entries-1];
347 * if there are overlapping entries, decide which
348 * "type" to use (larger value takes precedence --
349 * 1=usable, 2,3,4,4+=unusable)
352 for (i = 0; i < overlap_entries; i++)
353 if (overlap_list[i]->type > current_type)
354 current_type = overlap_list[i]->type;
356 * continue building up new bios map based on this
359 if (current_type != last_type) {
360 if (last_type != 0) {
361 new_bios[new_bios_entry].size =
362 change_point[chgidx]->addr - last_addr;
364 * move forward only if the new size
367 if (new_bios[new_bios_entry].size != 0)
369 * no more space left for new
372 if (++new_bios_entry >= max_nr_map)
375 if (current_type != 0) {
376 new_bios[new_bios_entry].addr =
377 change_point[chgidx]->addr;
378 new_bios[new_bios_entry].type = current_type;
379 last_addr = change_point[chgidx]->addr;
381 last_type = current_type;
384 /* retain count for new bios entries */
385 new_nr = new_bios_entry;
387 /* copy new bios mapping into original location */
388 memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
394 static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
397 u64 start = biosmap->addr;
398 u64 size = biosmap->size;
399 u64 end = start + size;
400 u32 type = biosmap->type;
402 /* Overflow in 64 bits? Ignore the memory map. */
406 e820_add_region(start, size, type);
415 * Copy the BIOS e820 map into a safe place.
417 * Sanity-check it while we're at it..
419 * If we're lucky and live on a modern system, the setup code
420 * will have given us a memory map that we can use to properly
421 * set up memory. If we aren't, we'll fake a memory map.
423 static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
425 /* Only one memory region (or negative)? Ignore it */
429 return __append_e820_map(biosmap, nr_map);
432 static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
433 u64 size, unsigned old_type,
438 u64 real_updated_size = 0;
440 BUG_ON(old_type == new_type);
442 if (size > (ULLONG_MAX - start))
443 size = ULLONG_MAX - start;
446 printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ",
447 (unsigned long long) start,
448 (unsigned long long) end);
449 e820_print_type(old_type);
450 printk(KERN_CONT " ==> ");
451 e820_print_type(new_type);
452 printk(KERN_CONT "\n");
454 for (i = 0; i < e820x->nr_map; i++) {
455 struct e820entry *ei = &e820x->map[i];
456 u64 final_start, final_end;
459 if (ei->type != old_type)
462 ei_end = ei->addr + ei->size;
463 /* totally covered by new range? */
464 if (ei->addr >= start && ei_end <= end) {
466 real_updated_size += ei->size;
470 /* new range is totally covered? */
471 if (ei->addr < start && ei_end > end) {
472 __e820_add_region(e820x, start, size, new_type);
473 __e820_add_region(e820x, end, ei_end - end, ei->type);
474 ei->size = start - ei->addr;
475 real_updated_size += size;
479 /* partially covered */
480 final_start = max(start, ei->addr);
481 final_end = min(end, ei_end);
482 if (final_start >= final_end)
485 __e820_add_region(e820x, final_start, final_end - final_start,
488 real_updated_size += final_end - final_start;
491 * left range could be head or tail, so need to update
494 ei->size -= final_end - final_start;
495 if (ei->addr < final_start)
497 ei->addr = final_end;
499 return real_updated_size;
502 u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
505 return __e820_update_range(&e820, start, size, old_type, new_type);
508 static u64 __init e820_update_range_saved(u64 start, u64 size,
509 unsigned old_type, unsigned new_type)
511 return __e820_update_range(&e820_saved, start, size, old_type,
515 /* make e820 not cover the range */
516 u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
521 u64 real_removed_size = 0;
523 if (size > (ULLONG_MAX - start))
524 size = ULLONG_MAX - start;
527 printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
528 (unsigned long long) start,
529 (unsigned long long) end);
530 e820_print_type(old_type);
531 printk(KERN_CONT "\n");
533 for (i = 0; i < e820.nr_map; i++) {
534 struct e820entry *ei = &e820.map[i];
535 u64 final_start, final_end;
537 if (checktype && ei->type != old_type)
539 /* totally covered? */
540 if (ei->addr >= start &&
541 (ei->addr + ei->size) <= (start + size)) {
542 real_removed_size += ei->size;
543 memset(ei, 0, sizeof(struct e820entry));
546 /* partially covered */
547 final_start = max(start, ei->addr);
548 final_end = min(start + size, ei->addr + ei->size);
549 if (final_start >= final_end)
551 real_removed_size += final_end - final_start;
553 ei->size -= final_end - final_start;
554 if (ei->addr < final_start)
556 ei->addr = final_end;
558 return real_removed_size;
561 void __init update_e820(void)
565 nr_map = e820.nr_map;
566 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
568 e820.nr_map = nr_map;
569 printk(KERN_INFO "modified physical RAM map:\n");
570 e820_print_map("modified");
572 static void __init update_e820_saved(void)
576 nr_map = e820_saved.nr_map;
577 if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
579 e820_saved.nr_map = nr_map;
581 #define MAX_GAP_END 0x100000000ull
583 * Search for a gap in the e820 memory space from start_addr to end_addr.
585 __init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
586 unsigned long start_addr, unsigned long long end_addr)
588 unsigned long long last;
592 last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END;
595 unsigned long long start = e820.map[i].addr;
596 unsigned long long end = start + e820.map[i].size;
598 if (end < start_addr)
602 * Since "last" is at most 4GB, we know we'll
603 * fit in 32 bits if this condition is true
606 unsigned long gap = last - end;
608 if (gap >= *gapsize) {
621 * Search for the biggest gap in the low 32 bits of the e820
622 * memory space. We pass this space to PCI to assign MMIO resources
623 * for hotplug or unconfigured devices in.
624 * Hopefully the BIOS let enough space left.
626 __init void e820_setup_gap(void)
628 unsigned long gapstart, gapsize;
631 gapstart = 0x10000000;
633 found = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END);
637 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
639 "PCI: Warning: Cannot find a gap in the 32bit address range\n"
640 "PCI: Unassigned devices with 32bit resource registers may break!\n");
645 * e820_reserve_resources_late protect stolen RAM already
647 pci_mem_start = gapstart;
650 "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
651 pci_mem_start, gapstart, gapsize);
655 * Because of the size limitation of struct boot_params, only first
656 * 128 E820 memory entries are passed to kernel via
657 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
658 * linked list of struct setup_data, which is parsed here.
660 void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
664 struct e820entry *extmap;
666 entries = sdata->len / sizeof(struct e820entry);
667 map_len = sdata->len + sizeof(struct setup_data);
668 if (map_len > PAGE_SIZE)
669 sdata = early_ioremap(pa_data, map_len);
670 extmap = (struct e820entry *)(sdata->data);
671 __append_e820_map(extmap, entries);
672 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
673 if (map_len > PAGE_SIZE)
674 early_iounmap(sdata, map_len);
675 printk(KERN_INFO "extended physical RAM map:\n");
676 e820_print_map("extended");
679 #if defined(CONFIG_X86_64) || \
680 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
682 * Find the ranges of physical addresses that do not correspond to
683 * e820 RAM areas and mark the corresponding pages as nosave for
684 * hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
686 * This function requires the e820 map to be sorted and without any
687 * overlapping entries and assumes the first e820 area to be RAM.
689 void __init e820_mark_nosave_regions(unsigned long limit_pfn)
694 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
695 for (i = 1; i < e820.nr_map; i++) {
696 struct e820entry *ei = &e820.map[i];
698 if (pfn < PFN_UP(ei->addr))
699 register_nosave_region(pfn, PFN_UP(ei->addr));
701 pfn = PFN_DOWN(ei->addr + ei->size);
702 if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
703 register_nosave_region(PFN_UP(ei->addr), pfn);
705 if (pfn >= limit_pfn)
711 #ifdef CONFIG_HIBERNATION
713 * Mark ACPI NVS memory region, so that we can save/restore it during
714 * hibernation and the subsequent resume.
716 static int __init e820_mark_nvs_memory(void)
720 for (i = 0; i < e820.nr_map; i++) {
721 struct e820entry *ei = &e820.map[i];
723 if (ei->type == E820_NVS)
724 hibernate_nvs_register(ei->addr, ei->size);
729 core_initcall(e820_mark_nvs_memory);
733 * Early reserved memory areas.
736 * need to make sure this one is bigger enough before
737 * find_e820_area could be used
739 #define MAX_EARLY_RES_X 32
746 static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata = {
747 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
748 #if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
750 * But first pinch a few for the stack/trampoline stuff
751 * FIXME: Don't need the extra page at 4K, but need to fix
752 * trampoline before removing it. (see the GDT stuff)
754 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
760 static int max_early_res __initdata = MAX_EARLY_RES_X;
761 static struct early_res *early_res __initdata = &early_res_x[0];
762 static int early_res_count __initdata =
770 static int __init find_overlapped_early(u64 start, u64 end)
775 for (i = 0; i < max_early_res && early_res[i].end; i++) {
777 if (end > r->start && start < r->end)
785 * Drop the i-th range from the early reservation map,
786 * by copying any higher ranges down one over it, and
787 * clearing what had been the last slot.
789 static void __init drop_range(int i)
793 for (j = i + 1; j < max_early_res && early_res[j].end; j++)
796 memmove(&early_res[i], &early_res[i + 1],
797 (j - 1 - i) * sizeof(struct early_res));
799 early_res[j - 1].end = 0;
804 * Split any existing ranges that:
805 * 1) are marked 'overlap_ok', and
806 * 2) overlap with the stated range [start, end)
807 * into whatever portion (if any) of the existing range is entirely
808 * below or entirely above the stated range. Drop the portion
809 * of the existing range that overlaps with the stated range,
810 * which will allow the caller of this routine to then add that
811 * stated range without conflicting with any existing range.
813 static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
817 u64 lower_start, lower_end;
818 u64 upper_start, upper_end;
821 for (i = 0; i < max_early_res && early_res[i].end; i++) {
824 /* Continue past non-overlapping ranges */
825 if (end <= r->start || start >= r->end)
829 * Leave non-ok overlaps as is; let caller
830 * panic "Overlapping early reservations"
831 * when it hits this overlap.
837 * We have an ok overlap. We will drop it from the early
838 * reservation map, and add back in any non-overlapping
839 * portions (lower or upper) as separate, overlap_ok,
840 * non-overlapping ranges.
843 /* 1. Note any non-overlapping (lower or upper) ranges. */
844 strncpy(name, r->name, sizeof(name) - 1);
846 lower_start = lower_end = 0;
847 upper_start = upper_end = 0;
848 if (r->start < start) {
849 lower_start = r->start;
857 /* 2. Drop the original ok overlapping range */
860 i--; /* resume for-loop on copied down entry */
862 /* 3. Add back in any non-overlapping ranges. */
864 reserve_early_overlap_ok(lower_start, lower_end, name);
866 reserve_early_overlap_ok(upper_start, upper_end, name);
870 static void __init __reserve_early(u64 start, u64 end, char *name,
876 i = find_overlapped_early(start, end);
877 if (i >= max_early_res)
878 panic("Too many early reservations");
881 panic("Overlapping early reservations "
882 "%llx-%llx %s to %llx-%llx %s\n",
883 start, end - 1, name?name:"", r->start,
884 r->end - 1, r->name);
887 r->overlap_ok = overlap_ok;
889 strncpy(r->name, name, sizeof(r->name) - 1);
894 * A few early reservtations come here.
896 * The 'overlap_ok' in the name of this routine does -not- mean it
897 * is ok for these reservations to overlap an earlier reservation.
898 * Rather it means that it is ok for subsequent reservations to
901 * Use this entry point to reserve early ranges when you are doing
902 * so out of "Paranoia", reserving perhaps more memory than you need,
903 * just in case, and don't mind a subsequent overlapping reservation
904 * that is known to be needed.
906 * The drop_overlaps_that_are_ok() call here isn't really needed.
907 * It would be needed if we had two colliding 'overlap_ok'
908 * reservations, so that the second such would not panic on the
909 * overlap with the first. We don't have any such as of this
910 * writing, but might as well tolerate such if it happens in
913 void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
915 drop_overlaps_that_are_ok(start, end);
916 __reserve_early(start, end, name, 1);
919 static void __init __check_and_double_early_res(u64 start)
922 struct early_res *new;
924 /* do we have enough slots left ? */
925 if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
929 end = max_pfn_mapped << PAGE_SHIFT;
930 size = sizeof(struct early_res) * max_early_res * 2;
931 mem = find_e820_area(start, end, size, sizeof(struct early_res));
934 panic("can not find more space for early_res array");
937 /* save the first one for own */
939 new[0].end = mem + size;
940 new[0].overlap_ok = 0;
941 /* copy old to new */
942 if (early_res == early_res_x) {
943 memcpy(&new[1], &early_res[0],
944 sizeof(struct early_res) * max_early_res);
945 memset(&new[max_early_res+1], 0,
946 sizeof(struct early_res) * (max_early_res - 1));
949 memcpy(&new[1], &early_res[1],
950 sizeof(struct early_res) * (max_early_res - 1));
951 memset(&new[max_early_res], 0,
952 sizeof(struct early_res) * max_early_res);
954 memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
957 printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
958 max_early_res, mem, mem + size - 1);
962 * Most early reservations come here.
964 * We first have drop_overlaps_that_are_ok() drop any pre-existing
965 * 'overlap_ok' ranges, so that we can then reserve this memory
966 * range without risk of panic'ing on an overlapping overlap_ok
969 void __init reserve_early(u64 start, u64 end, char *name)
974 __check_and_double_early_res(end);
976 drop_overlaps_that_are_ok(start, end);
977 __reserve_early(start, end, name, 0);
980 void __init reserve_early_without_check(u64 start, u64 end, char *name)
987 __check_and_double_early_res(end);
989 r = &early_res[early_res_count];
995 strncpy(r->name, name, sizeof(r->name) - 1);
999 void __init free_early(u64 start, u64 end)
1001 struct early_res *r;
1004 i = find_overlapped_early(start, end);
1006 if (i >= max_early_res || r->end != end || r->start != start)
1007 panic("free_early on not reserved area: %llx-%llx!",
1013 #ifdef CONFIG_NO_BOOTMEM
1014 static void __init subtract_early_res(struct range *range, int az)
1017 u64 final_start, final_end;
1021 for (i = 0; i < max_early_res && early_res[i].end; i++)
1024 /* need to skip first one ?*/
1025 if (early_res != early_res_x)
1029 printk(KERN_INFO "Subtract (%d early reservations)\n", count);
1031 for (i = idx; i < count; i++) {
1032 struct early_res *r = &early_res[i];
1034 printk(KERN_INFO " #%d [%010llx - %010llx] %15s", i,
1035 r->start, r->end, r->name);
1037 final_start = PFN_DOWN(r->start);
1038 final_end = PFN_UP(r->end);
1039 if (final_start >= final_end) {
1041 printk(KERN_CONT "\n");
1046 printk(KERN_CONT " subtract pfn [%010llx - %010llx]\n",
1047 final_start, final_end);
1049 subtract_range(range, az, final_start, final_end);
1054 int __init get_free_all_memory_range(struct range **rangep, int nodeid)
1060 struct range *range;
1064 for (i = 0; i < max_early_res && early_res[i].end; i++)
1069 size = sizeof(struct range) * count;
1070 #ifdef MAX_DMA32_PFN
1071 if (max_pfn_mapped > MAX_DMA32_PFN)
1072 start = MAX_DMA32_PFN << PAGE_SHIFT;
1074 end = max_pfn_mapped << PAGE_SHIFT;
1075 mem = find_e820_area(start, end, size, sizeof(struct range));
1077 panic("can not find more space for range free");
1080 /* use early_node_map[] and early_res to get range array at first */
1081 memset(range, 0, size);
1084 /* need to go over early_node_map to find out good range for node */
1085 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
1086 subtract_early_res(range, count);
1087 nr_range = clean_sort_range(range, count);
1089 /* need to clear it ? */
1090 if (nodeid == MAX_NUMNODES) {
1091 memset(&early_res[0], 0,
1092 sizeof(struct early_res) * max_early_res);
1101 void __init early_res_to_bootmem(u64 start, u64 end)
1104 u64 final_start, final_end;
1108 for (i = 0; i < max_early_res && early_res[i].end; i++)
1111 /* need to skip first one ?*/
1112 if (early_res != early_res_x)
1115 printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
1116 count - idx, max_early_res, start, end);
1117 for (i = idx; i < count; i++) {
1118 struct early_res *r = &early_res[i];
1119 printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
1120 r->start, r->end, r->name);
1121 final_start = max(start, r->start);
1122 final_end = min(end, r->end);
1123 if (final_start >= final_end) {
1124 printk(KERN_CONT "\n");
1127 printk(KERN_CONT " ==> [%010llx - %010llx]\n",
1128 final_start, final_end);
1129 reserve_bootmem_generic(final_start, final_end - final_start,
1133 memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
1136 early_res_count = 0;
1140 /* Check for already reserved areas */
1141 static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
1146 struct early_res *r;
1148 i = find_overlapped_early(addr, addr + size);
1150 if (i < max_early_res && r->end) {
1151 *addrp = addr = round_up(r->end, align);
1158 /* Check for already reserved areas */
1159 static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
1162 u64 addr = *addrp, last;
1167 for (i = 0; i < max_early_res && early_res[i].end; i++) {
1168 struct early_res *r = &early_res[i];
1169 if (last > r->start && addr < r->start) {
1170 size = r->start - addr;
1174 if (last > r->end && addr < r->end) {
1175 addr = round_up(r->end, align);
1180 if (last <= r->end && addr >= r->start) {
1193 * Find a free area with specified alignment in a specific range.
1194 * only with the area.between start to end is active range from early_node_map
1195 * so they are good as RAM
1197 u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
1198 u64 size, u64 align)
1202 addr = round_up(ei_start, align);
1204 addr = round_up(start, align);
1205 if (addr >= ei_last)
1207 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
1222 * Find a free area with specified alignment in a specific range.
1224 u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
1228 for (i = 0; i < e820.nr_map; i++) {
1229 struct e820entry *ei = &e820.map[i];
1231 u64 ei_start, ei_last;
1233 if (ei->type != E820_RAM)
1236 ei_last = ei->addr + ei->size;
1237 ei_start = ei->addr;
1238 addr = find_early_area(ei_start, ei_last, start, end,
1250 * Find next free range after *start
1252 u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
1256 for (i = 0; i < e820.nr_map; i++) {
1257 struct e820entry *ei = &e820.map[i];
1261 if (ei->type != E820_RAM)
1263 addr = round_up(ei->addr, align);
1264 ei_last = ei->addr + ei->size;
1266 addr = round_up(start, align);
1267 if (addr >= ei_last)
1269 *sizep = ei_last - addr;
1270 while (bad_addr_size(&addr, sizep, align) &&
1271 addr + *sizep <= ei_last)
1273 last = addr + *sizep;
1283 * pre allocated 4k and reserved it in e820
1285 u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1291 for (start = startt; ; start += size) {
1292 start = find_e820_area_size(start, &size, align);
1299 #ifdef CONFIG_X86_32
1300 if (start >= MAXMEM)
1302 if (start + size > MAXMEM)
1303 size = MAXMEM - start;
1306 addr = round_down(start + size - sizet, align);
1309 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
1310 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
1311 printk(KERN_INFO "update e820 for early_reserve_e820\n");
1313 update_e820_saved();
1318 #ifdef CONFIG_X86_32
1319 # ifdef CONFIG_X86_PAE
1320 # define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
1322 # define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
1324 #else /* CONFIG_X86_32 */
1325 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
1329 * Find the highest page frame number we have available
1331 static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
1334 unsigned long last_pfn = 0;
1335 unsigned long max_arch_pfn = MAX_ARCH_PFN;
1337 for (i = 0; i < e820.nr_map; i++) {
1338 struct e820entry *ei = &e820.map[i];
1339 unsigned long start_pfn;
1340 unsigned long end_pfn;
1342 if (ei->type != type)
1345 start_pfn = ei->addr >> PAGE_SHIFT;
1346 end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
1348 if (start_pfn >= limit_pfn)
1350 if (end_pfn > limit_pfn) {
1351 last_pfn = limit_pfn;
1354 if (end_pfn > last_pfn)
1358 if (last_pfn > max_arch_pfn)
1359 last_pfn = max_arch_pfn;
1361 printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
1362 last_pfn, max_arch_pfn);
1365 unsigned long __init e820_end_of_ram_pfn(void)
1367 return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
1370 unsigned long __init e820_end_of_low_ram_pfn(void)
1372 return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
1375 * Finds an active region in the address range from start_pfn to last_pfn and
1376 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
1378 int __init e820_find_active_region(const struct e820entry *ei,
1379 unsigned long start_pfn,
1380 unsigned long last_pfn,
1381 unsigned long *ei_startpfn,
1382 unsigned long *ei_endpfn)
1384 u64 align = PAGE_SIZE;
1386 *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
1387 *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
1389 /* Skip map entries smaller than a page */
1390 if (*ei_startpfn >= *ei_endpfn)
1393 /* Skip if map is outside the node */
1394 if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
1395 *ei_startpfn >= last_pfn)
1398 /* Check for overlaps */
1399 if (*ei_startpfn < start_pfn)
1400 *ei_startpfn = start_pfn;
1401 if (*ei_endpfn > last_pfn)
1402 *ei_endpfn = last_pfn;
1407 /* Walk the e820 map and register active regions within a node */
1408 void __init e820_register_active_regions(int nid, unsigned long start_pfn,
1409 unsigned long last_pfn)
1411 unsigned long ei_startpfn;
1412 unsigned long ei_endpfn;
1415 for (i = 0; i < e820.nr_map; i++)
1416 if (e820_find_active_region(&e820.map[i],
1417 start_pfn, last_pfn,
1418 &ei_startpfn, &ei_endpfn))
1419 add_active_range(nid, ei_startpfn, ei_endpfn);
1423 * Find the hole size (in bytes) in the memory range.
1424 * @start: starting address of the memory range to scan
1425 * @end: ending address of the memory range to scan
1427 u64 __init e820_hole_size(u64 start, u64 end)
1429 unsigned long start_pfn = start >> PAGE_SHIFT;
1430 unsigned long last_pfn = end >> PAGE_SHIFT;
1431 unsigned long ei_startpfn, ei_endpfn, ram = 0;
1434 for (i = 0; i < e820.nr_map; i++) {
1435 if (e820_find_active_region(&e820.map[i],
1436 start_pfn, last_pfn,
1437 &ei_startpfn, &ei_endpfn))
1438 ram += ei_endpfn - ei_startpfn;
1440 return end - start - ((u64)ram << PAGE_SHIFT);
1443 static void early_panic(char *msg)
1449 static int userdef __initdata;
1451 /* "mem=nopentium" disables the 4MB page tables. */
1452 static int __init parse_memopt(char *p)
1459 #ifdef CONFIG_X86_32
1460 if (!strcmp(p, "nopentium")) {
1461 setup_clear_cpu_cap(X86_FEATURE_PSE);
1467 mem_size = memparse(p, &p);
1468 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
1472 early_param("mem", parse_memopt);
1474 static int __init parse_memmap_opt(char *p)
1477 u64 start_at, mem_size;
1482 if (!strncmp(p, "exactmap", 8)) {
1483 #ifdef CONFIG_CRASH_DUMP
1485 * If we are doing a crash dump, we still need to know
1486 * the real mem size before original memory map is
1489 saved_max_pfn = e820_end_of_ram_pfn();
1497 mem_size = memparse(p, &p);
1503 start_at = memparse(p+1, &p);
1504 e820_add_region(start_at, mem_size, E820_RAM);
1505 } else if (*p == '#') {
1506 start_at = memparse(p+1, &p);
1507 e820_add_region(start_at, mem_size, E820_ACPI);
1508 } else if (*p == '$') {
1509 start_at = memparse(p+1, &p);
1510 e820_add_region(start_at, mem_size, E820_RESERVED);
1512 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
1514 return *p == '\0' ? 0 : -EINVAL;
1516 early_param("memmap", parse_memmap_opt);
1518 void __init finish_e820_parsing(void)
1521 u32 nr = e820.nr_map;
1523 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
1524 early_panic("Invalid user supplied memory map");
1527 printk(KERN_INFO "user-defined physical RAM map:\n");
1528 e820_print_map("user");
1532 static inline const char *e820_type_to_string(int e820_type)
1534 switch (e820_type) {
1535 case E820_RESERVED_KERN:
1536 case E820_RAM: return "System RAM";
1537 case E820_ACPI: return "ACPI Tables";
1538 case E820_NVS: return "ACPI Non-volatile Storage";
1539 case E820_UNUSABLE: return "Unusable memory";
1540 default: return "reserved";
1545 * Mark e820 reserved areas as busy for the resource manager.
1547 static struct resource __initdata *e820_res;
1548 void __init e820_reserve_resources(void)
1551 struct resource *res;
1554 res = alloc_bootmem(sizeof(struct resource) * e820.nr_map);
1556 for (i = 0; i < e820.nr_map; i++) {
1557 end = e820.map[i].addr + e820.map[i].size - 1;
1558 if (end != (resource_size_t)end) {
1562 res->name = e820_type_to_string(e820.map[i].type);
1563 res->start = e820.map[i].addr;
1566 res->flags = IORESOURCE_MEM;
1569 * don't register the region that could be conflicted with
1570 * pci device BAR resource and insert them later in
1571 * pcibios_resource_survey()
1573 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
1574 res->flags |= IORESOURCE_BUSY;
1575 insert_resource(&iomem_resource, res);
1580 for (i = 0; i < e820_saved.nr_map; i++) {
1581 struct e820entry *entry = &e820_saved.map[i];
1582 firmware_map_add_early(entry->addr,
1583 entry->addr + entry->size - 1,
1584 e820_type_to_string(entry->type));
1588 /* How much should we pad RAM ending depending on where it is? */
1589 static unsigned long ram_alignment(resource_size_t pos)
1591 unsigned long mb = pos >> 20;
1593 /* To 64kB in the first megabyte */
1597 /* To 1MB in the first 16MB */
1601 /* To 64MB for anything above that */
1602 return 64*1024*1024;
1605 #define MAX_RESOURCE_SIZE ((resource_size_t)-1)
1607 void __init e820_reserve_resources_late(void)
1610 struct resource *res;
1613 for (i = 0; i < e820.nr_map; i++) {
1614 if (!res->parent && res->end)
1615 insert_resource_expand_to_fit(&iomem_resource, res);
1620 * Try to bump up RAM regions to reasonable boundaries to
1623 for (i = 0; i < e820.nr_map; i++) {
1624 struct e820entry *entry = &e820.map[i];
1627 if (entry->type != E820_RAM)
1629 start = entry->addr + entry->size;
1630 end = round_up(start, ram_alignment(start)) - 1;
1631 if (end > MAX_RESOURCE_SIZE)
1632 end = MAX_RESOURCE_SIZE;
1635 printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
1637 reserve_region_with_split(&iomem_resource, start, end,
1642 char *__init default_machine_specific_memory_setup(void)
1644 char *who = "BIOS-e820";
1647 * Try to copy the BIOS-supplied E820-map.
1649 * Otherwise fake a memory map; one section from 0k->640k,
1650 * the next section from 1mb->appropriate_mem_k
1652 new_nr = boot_params.e820_entries;
1653 sanitize_e820_map(boot_params.e820_map,
1654 ARRAY_SIZE(boot_params.e820_map),
1656 boot_params.e820_entries = new_nr;
1657 if (append_e820_map(boot_params.e820_map, boot_params.e820_entries)
1661 /* compare results from other methods and take the greater */
1662 if (boot_params.alt_mem_k
1663 < boot_params.screen_info.ext_mem_k) {
1664 mem_size = boot_params.screen_info.ext_mem_k;
1667 mem_size = boot_params.alt_mem_k;
1672 e820_add_region(0, LOWMEMSIZE(), E820_RAM);
1673 e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
1676 /* In case someone cares... */
1680 void __init setup_memory_map(void)
1684 who = x86_init.resources.memory_setup();
1685 memcpy(&e820_saved, &e820, sizeof(struct e820map));
1686 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
1687 e820_print_map(who);