2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
14 #include <linux/sched.h>
17 #include <asm/proto.h>
23 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24 EXPORT_SYMBOL(node_data);
26 struct memnode memnode;
28 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
29 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
32 int numa_off __initdata;
33 static unsigned long __initdata nodemap_addr;
34 static unsigned long __initdata nodemap_size;
36 DEFINE_PER_CPU(int, node_number) = 0;
37 EXPORT_PER_CPU_SYMBOL(node_number);
40 * Map cpu index to node index
42 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
43 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
46 * Given a shift value, try to populate memnodemap[]
49 * 0 if memnodmap[] too small (of shift too small)
50 * -1 if node overlap or lost ram (shift too big)
52 static int __init populate_memnodemap(const struct bootnode *nodes,
53 int numnodes, int shift, int *nodeids)
55 unsigned long addr, end;
58 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
59 for (i = 0; i < numnodes; i++) {
60 addr = nodes[i].start;
64 if ((end >> shift) >= memnodemapsize)
67 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
71 memnodemap[addr >> shift] = i;
73 memnodemap[addr >> shift] = nodeids[i];
75 addr += (1UL << shift);
82 static int __init allocate_cachealigned_memnodemap(void)
86 memnodemap = memnode.embedded_map;
87 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
91 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
92 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
93 nodemap_size, L1_CACHE_BYTES);
94 if (nodemap_addr == -1UL) {
96 "NUMA: Unable to allocate Memory to Node hash map\n");
97 nodemap_addr = nodemap_size = 0;
100 memnodemap = phys_to_virt(nodemap_addr);
101 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
103 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
104 nodemap_addr, nodemap_addr + nodemap_size);
109 * The LSB of all start and end addresses in the node map is the value of the
110 * maximum possible shift.
112 static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
115 int i, nodes_used = 0;
116 unsigned long start, end;
117 unsigned long bitfield = 0, memtop = 0;
119 for (i = 0; i < numnodes; i++) {
120 start = nodes[i].start;
132 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
133 memnodemapsize = (memtop >> i)+1;
137 int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
142 shift = extract_lsb_from_nodes(nodes, numnodes);
143 if (allocate_cachealigned_memnodemap())
145 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
148 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
149 printk(KERN_INFO "Your memory is not aligned you need to "
150 "rebuild your kernel with a bigger NODEMAPSIZE "
151 "shift=%d\n", shift);
157 int __meminit __early_pfn_to_nid(unsigned long pfn)
159 return phys_to_nid(pfn << PAGE_SHIFT);
162 static void * __init early_node_mem(int nodeid, unsigned long start,
163 unsigned long end, unsigned long size,
169 * put it on high as possible
170 * something will go with NODE_DATA
172 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
173 start = MAX_DMA_PFN<<PAGE_SHIFT;
174 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
175 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
176 start = MAX_DMA32_PFN<<PAGE_SHIFT;
177 mem = find_e820_area(start, end, size, align);
181 /* extend the search scope */
182 end = max_pfn_mapped << PAGE_SHIFT;
183 if (end > (MAX_DMA32_PFN<<PAGE_SHIFT))
184 start = MAX_DMA32_PFN<<PAGE_SHIFT;
186 start = MAX_DMA_PFN<<PAGE_SHIFT;
187 mem = find_e820_area(start, end, size, align);
191 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
197 /* Initialize bootmem allocator for a node */
199 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
201 unsigned long start_pfn, last_pfn, nodedata_phys;
202 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
204 #ifndef CONFIG_NO_BOOTMEM
205 unsigned long bootmap_start, bootmap_pages, bootmap_size;
213 * Don't confuse VM with a node that doesn't have the
214 * minimum amount of memory:
216 if (end && (end - start) < NODE_MIN_SIZE)
219 start = roundup(start, ZONE_ALIGN);
221 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
224 start_pfn = start >> PAGE_SHIFT;
225 last_pfn = end >> PAGE_SHIFT;
227 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
229 if (node_data[nodeid] == NULL)
231 nodedata_phys = __pa(node_data[nodeid]);
232 reserve_early(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
233 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
234 nodedata_phys + pgdat_size - 1);
235 nid = phys_to_nid(nodedata_phys);
237 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
239 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
240 NODE_DATA(nodeid)->node_id = nodeid;
241 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
242 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
244 #ifndef CONFIG_NO_BOOTMEM
245 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
248 * Find a place for the bootmem map
249 * nodedata_phys could be on other nodes by alloc_bootmem,
250 * so need to sure bootmap_start not to be small, otherwise
251 * early_node_mem will get that with find_e820_area instead
252 * of alloc_bootmem, that could clash with reserved range
254 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
255 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
257 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
258 * to use that to align to PAGE_SIZE
260 bootmap = early_node_mem(nodeid, bootmap_start, end,
261 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
262 if (bootmap == NULL) {
263 free_early(nodedata_phys, nodedata_phys + pgdat_size);
264 node_data[nodeid] = NULL;
267 bootmap_start = __pa(bootmap);
268 reserve_early(bootmap_start, bootmap_start+(bootmap_pages<<PAGE_SHIFT),
271 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
272 bootmap_start >> PAGE_SHIFT,
273 start_pfn, last_pfn);
275 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
276 bootmap_start, bootmap_start + bootmap_size - 1,
278 nid = phys_to_nid(bootmap_start);
280 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
282 free_bootmem_with_active_regions(nodeid, end);
285 node_set_online(nodeid);
289 * There are unfortunately some poorly designed mainboards around that
290 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
291 * mapping. To avoid this fill in the mapping for all possible CPUs,
292 * as the number of CPUs is not known yet. We round robin the existing
295 void __init numa_init_array(void)
299 rr = first_node(node_online_map);
300 for (i = 0; i < nr_cpu_ids; i++) {
301 if (early_cpu_to_node(i) != NUMA_NO_NODE)
303 numa_set_node(i, rr);
304 rr = next_node(rr, node_online_map);
305 if (rr == MAX_NUMNODES)
306 rr = first_node(node_online_map);
310 #ifdef CONFIG_NUMA_EMU
312 static struct bootnode nodes[MAX_NUMNODES] __initdata;
313 static struct bootnode physnodes[MAX_NUMNODES] __initdata;
314 static char *cmdline __initdata;
316 static int __init setup_physnodes(unsigned long start, unsigned long end,
323 #ifdef CONFIG_ACPI_NUMA
325 nr_nodes = acpi_get_nodes(physnodes);
327 #ifdef CONFIG_K8_NUMA
329 nr_nodes = k8_get_nodes(physnodes);
332 * Basic sanity checking on the physical node map: there may be errors
333 * if the SRAT or K8 incorrectly reported the topology or the mem=
334 * kernel parameter is used.
336 for (i = 0; i < nr_nodes; i++) {
337 if (physnodes[i].start == physnodes[i].end)
339 if (physnodes[i].start > end) {
340 physnodes[i].end = physnodes[i].start;
343 if (physnodes[i].end < start) {
344 physnodes[i].start = physnodes[i].end;
347 if (physnodes[i].start < start)
348 physnodes[i].start = start;
349 if (physnodes[i].end > end)
350 physnodes[i].end = end;
354 * Remove all nodes that have no memory or were truncated because of the
355 * limited address range.
357 for (i = 0; i < nr_nodes; i++) {
358 if (physnodes[i].start == physnodes[i].end)
360 physnodes[ret].start = physnodes[i].start;
361 physnodes[ret].end = physnodes[i].end;
366 * If no physical topology was detected, a single node is faked to cover
367 * the entire address space.
370 physnodes[ret].start = start;
371 physnodes[ret].end = end;
378 * Setups up nid to range from addr to addr + size. If the end
379 * boundary is greater than max_addr, then max_addr is used instead.
380 * The return value is 0 if there is additional memory left for
381 * allocation past addr and -1 otherwise. addr is adjusted to be at
382 * the end of the node.
384 static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
387 nodes[nid].start = *addr;
389 if (*addr >= max_addr) {
393 nodes[nid].end = *addr;
394 node_set(nid, node_possible_map);
395 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
396 nodes[nid].start, nodes[nid].end,
397 (nodes[nid].end - nodes[nid].start) >> 20);
402 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
403 * to max_addr. The return value is the number of nodes allocated.
405 static int __init split_nodes_interleave(u64 addr, u64 max_addr,
406 int nr_phys_nodes, int nr_nodes)
408 nodemask_t physnode_mask = NODE_MASK_NONE;
416 if (nr_nodes > MAX_NUMNODES) {
417 pr_info("numa=fake=%d too large, reducing to %d\n",
418 nr_nodes, MAX_NUMNODES);
419 nr_nodes = MAX_NUMNODES;
422 size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes;
424 * Calculate the number of big nodes that can be allocated as a result
425 * of consolidating the remainder.
427 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) & nr_nodes) /
430 size &= FAKE_NODE_MIN_HASH_MASK;
432 pr_err("Not enough memory for each node. "
433 "NUMA emulation disabled.\n");
437 for (i = 0; i < nr_phys_nodes; i++)
438 if (physnodes[i].start != physnodes[i].end)
439 node_set(i, physnode_mask);
442 * Continue to fill physical nodes with fake nodes until there is no
443 * memory left on any of them.
445 while (nodes_weight(physnode_mask)) {
446 for_each_node_mask(i, physnode_mask) {
447 u64 end = physnodes[i].start + size;
448 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
451 end += FAKE_NODE_MIN_SIZE;
454 * Continue to add memory to this fake node if its
455 * non-reserved memory is less than the per-node size.
457 while (end - physnodes[i].start -
458 e820_hole_size(physnodes[i].start, end) < size) {
459 end += FAKE_NODE_MIN_SIZE;
460 if (end > physnodes[i].end) {
461 end = physnodes[i].end;
467 * If there won't be at least FAKE_NODE_MIN_SIZE of
468 * non-reserved memory in ZONE_DMA32 for the next node,
469 * this one must extend to the boundary.
471 if (end < dma32_end && dma32_end - end -
472 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
476 * If there won't be enough non-reserved memory for the
477 * next node, this one must extend to the end of the
480 if (physnodes[i].end - end -
481 e820_hole_size(end, physnodes[i].end) < size)
482 end = physnodes[i].end;
485 * Avoid allocating more nodes than requested, which can
486 * happen as a result of rounding down each node's size
487 * to FAKE_NODE_MIN_SIZE.
489 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
490 end = physnodes[i].end;
492 if (setup_node_range(ret++, &physnodes[i].start,
493 end - physnodes[i].start,
494 physnodes[i].end) < 0)
495 node_clear(i, physnode_mask);
502 * Splits num_nodes nodes up equally starting at node_start. The return value
503 * is the number of nodes split up and addr is adjusted to be at the end of the
504 * last node allocated.
506 static int __init split_nodes_equally(u64 *addr, u64 max_addr, int node_start,
515 if (num_nodes > MAX_NUMNODES)
516 num_nodes = MAX_NUMNODES;
517 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
520 * Calculate the number of big nodes that can be allocated as a result
521 * of consolidating the leftovers.
523 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
526 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
527 size &= FAKE_NODE_MIN_HASH_MASK;
529 printk(KERN_ERR "Not enough memory for each node. "
530 "NUMA emulation disabled.\n");
534 for (i = node_start; i < num_nodes + node_start; i++) {
535 u64 end = *addr + size;
538 end += FAKE_NODE_MIN_SIZE;
540 * The final node can have the remaining system RAM. Other
541 * nodes receive roughly the same amount of available pages.
543 if (i == num_nodes + node_start - 1)
546 while (end - *addr - e820_hole_size(*addr, end) <
548 end += FAKE_NODE_MIN_SIZE;
549 if (end > max_addr) {
554 if (setup_node_range(i, addr, end - *addr, max_addr) < 0)
557 return i - node_start + 1;
561 * Splits the remaining system RAM into chunks of size. The remaining memory is
562 * always assigned to a final node and can be asymmetric. Returns the number of
565 static int __init split_nodes_by_size(u64 *addr, u64 max_addr, int node_start,
569 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
570 while (!setup_node_range(i++, addr, size, max_addr))
572 return i - node_start;
576 * Sets up the system RAM area from start_pfn to last_pfn according to the
577 * numa=fake command-line option.
579 static int __init numa_emulation(unsigned long start_pfn,
580 unsigned long last_pfn, int acpi, int k8)
582 u64 size, addr = start_pfn << PAGE_SHIFT;
583 u64 max_addr = last_pfn << PAGE_SHIFT;
584 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
587 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8);
589 * If the numa=fake command-line is just a single number N, split the
590 * system RAM into N fake nodes.
592 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
593 long n = simple_strtol(cmdline, NULL, 0);
595 num_nodes = split_nodes_interleave(addr, max_addr,
602 /* Parse the command line. */
603 for (coeff_flag = 0; ; cmdline++) {
604 if (*cmdline && isdigit(*cmdline)) {
605 num = num * 10 + *cmdline - '0';
608 if (*cmdline == '*') {
613 if (!*cmdline || *cmdline == ',') {
617 * Round down to the nearest FAKE_NODE_MIN_SIZE.
618 * Command-line coefficients are in megabytes.
620 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
622 for (i = 0; i < coeff; i++, num_nodes++)
623 if (setup_node_range(num_nodes, &addr,
636 /* Fill remainder of system RAM, if appropriate. */
637 if (addr < max_addr) {
638 if (coeff_flag && coeff < 0) {
639 /* Split remaining nodes into num-sized chunks */
640 num_nodes += split_nodes_by_size(&addr, max_addr,
644 switch (*(cmdline - 1)) {
646 /* Split remaining nodes into coeff chunks */
649 num_nodes += split_nodes_equally(&addr, max_addr,
653 /* Do not allocate remaining system RAM */
656 /* Give one final node */
657 setup_node_range(num_nodes, &addr, max_addr - addr,
663 memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
664 if (memnode_shift < 0) {
666 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
672 * We need to vacate all active ranges that may have been registered for
673 * the e820 memory map.
675 remove_all_active_ranges();
676 for_each_node_mask(i, node_possible_map) {
677 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
678 nodes[i].end >> PAGE_SHIFT);
679 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
681 acpi_fake_nodes(nodes, num_nodes);
685 #endif /* CONFIG_NUMA_EMU */
687 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
692 nodes_clear(node_possible_map);
693 nodes_clear(node_online_map);
695 #ifdef CONFIG_NUMA_EMU
696 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8))
698 nodes_clear(node_possible_map);
699 nodes_clear(node_online_map);
702 #ifdef CONFIG_ACPI_NUMA
703 if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
704 last_pfn << PAGE_SHIFT))
706 nodes_clear(node_possible_map);
707 nodes_clear(node_online_map);
710 #ifdef CONFIG_K8_NUMA
711 if (!numa_off && k8 && !k8_scan_nodes())
713 nodes_clear(node_possible_map);
714 nodes_clear(node_online_map);
716 printk(KERN_INFO "%s\n",
717 numa_off ? "NUMA turned off" : "No NUMA configuration found");
719 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
720 start_pfn << PAGE_SHIFT,
721 last_pfn << PAGE_SHIFT);
722 /* setup dummy node covering all memory */
724 memnodemap = memnode.embedded_map;
727 node_set(0, node_possible_map);
728 for (i = 0; i < nr_cpu_ids; i++)
730 e820_register_active_regions(0, start_pfn, last_pfn);
731 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
734 unsigned long __init numa_free_all_bootmem(void)
736 unsigned long pages = 0;
739 for_each_online_node(i)
740 pages += free_all_bootmem_node(NODE_DATA(i));
742 #ifdef CONFIG_NO_BOOTMEM
743 pages += free_all_memory_core_early(MAX_NUMNODES);
749 static __init int numa_setup(char *opt)
753 if (!strncmp(opt, "off", 3))
755 #ifdef CONFIG_NUMA_EMU
756 if (!strncmp(opt, "fake=", 5))
759 #ifdef CONFIG_ACPI_NUMA
760 if (!strncmp(opt, "noacpi", 6))
765 early_param("numa", numa_setup);
769 static __init int find_near_online_node(int node)
772 int min_val = INT_MAX;
775 for_each_online_node(n) {
776 val = node_distance(node, n);
788 * Setup early cpu_to_node.
790 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
791 * and apicid_to_node[] tables have valid entries for a CPU.
792 * This means we skip cpu_to_node[] initialisation for NUMA
793 * emulation and faking node case (when running a kernel compiled
794 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
795 * is already initialized in a round robin manner at numa_init_array,
796 * prior to this call, and this initialization is good enough
797 * for the fake NUMA cases.
799 * Called before the per_cpu areas are setup.
801 void __init init_cpu_to_node(void)
804 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
806 BUG_ON(cpu_to_apicid == NULL);
808 for_each_possible_cpu(cpu) {
810 u16 apicid = cpu_to_apicid[cpu];
812 if (apicid == BAD_APICID)
814 node = apicid_to_node[apicid];
815 if (node == NUMA_NO_NODE)
817 if (!node_online(node))
818 node = find_near_online_node(node);
819 numa_set_node(cpu, node);
825 void __cpuinit numa_set_node(int cpu, int node)
827 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
829 /* early setting, no percpu area yet */
830 if (cpu_to_node_map) {
831 cpu_to_node_map[cpu] = node;
835 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
836 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
837 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
842 per_cpu(x86_cpu_to_node_map, cpu) = node;
844 if (node != NUMA_NO_NODE)
845 per_cpu(node_number, cpu) = node;
848 void __cpuinit numa_clear_node(int cpu)
850 numa_set_node(cpu, NUMA_NO_NODE);
853 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
855 void __cpuinit numa_add_cpu(int cpu)
857 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
860 void __cpuinit numa_remove_cpu(int cpu)
862 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
865 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
868 * --------- debug versions of the numa functions ---------
870 static void __cpuinit numa_set_cpumask(int cpu, int enable)
872 int node = early_cpu_to_node(cpu);
873 struct cpumask *mask;
876 mask = node_to_cpumask_map[node];
878 printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
884 cpumask_set_cpu(cpu, mask);
886 cpumask_clear_cpu(cpu, mask);
888 cpulist_scnprintf(buf, sizeof(buf), mask);
889 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
890 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
893 void __cpuinit numa_add_cpu(int cpu)
895 numa_set_cpumask(cpu, 1);
898 void __cpuinit numa_remove_cpu(int cpu)
900 numa_set_cpumask(cpu, 0);
903 int cpu_to_node(int cpu)
905 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
907 "cpu_to_node(%d): usage too early!\n", cpu);
909 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
911 return per_cpu(x86_cpu_to_node_map, cpu);
913 EXPORT_SYMBOL(cpu_to_node);
916 * Same function as cpu_to_node() but used if called before the
917 * per_cpu areas are setup.
919 int early_cpu_to_node(int cpu)
921 if (early_per_cpu_ptr(x86_cpu_to_node_map))
922 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
924 if (!cpu_possible(cpu)) {
926 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
930 return per_cpu(x86_cpu_to_node_map, cpu);
934 * --------- end of debug versions of the numa functions ---------
937 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */