]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/srat_64.c
x86-64: Combine SRAT regions when possible
[net-next-2.6.git] / arch / x86 / mm / srat_64.c
CommitLineData
1da177e4
LT
1/*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12#include <linux/kernel.h>
13#include <linux/acpi.h>
14#include <linux/mmzone.h>
15#include <linux/bitmap.h>
16#include <linux/module.h>
17#include <linux/topology.h>
68a3a7fe
AK
18#include <linux/bootmem.h>
19#include <linux/mm.h>
1da177e4
LT
20#include <asm/proto.h>
21#include <asm/numa.h>
8a6fdd3e 22#include <asm/e820.h>
7b6aa335 23#include <asm/apic.h>
4ec71fa2 24#include <asm/uv/uv.h>
1da177e4 25
c31fbb1a
AK
26int acpi_numa __initdata;
27
1da177e4
LT
28static struct acpi_table_slit *acpi_slit;
29
30static nodemask_t nodes_parsed __initdata;
dc098551 31static nodemask_t cpu_nodes_parsed __initdata;
abe059e7 32static struct bootnode nodes[MAX_NUMNODES] __initdata;
4942e998 33static struct bootnode nodes_add[MAX_NUMNODES];
1da177e4 34
6ec6e0d9
SS
35static int num_node_memblks __initdata;
36static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
37static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
38
1da177e4
LT
39static __init int setup_node(int pxm)
40{
762834e8 41 return acpi_map_pxm_to_node(pxm);
1da177e4
LT
42}
43
6ec6e0d9 44static __init int conflicting_memblks(unsigned long start, unsigned long end)
1da177e4
LT
45{
46 int i;
6ec6e0d9
SS
47 for (i = 0; i < num_node_memblks; i++) {
48 struct bootnode *nd = &node_memblk_range[i];
1da177e4
LT
49 if (nd->start == nd->end)
50 continue;
51 if (nd->end > start && nd->start < end)
6ec6e0d9 52 return memblk_nodeid[i];
1da177e4 53 if (nd->end == end && nd->start == start)
6ec6e0d9 54 return memblk_nodeid[i];
1da177e4
LT
55 }
56 return -1;
57}
58
59static __init void cutoff_node(int i, unsigned long start, unsigned long end)
60{
abe059e7 61 struct bootnode *nd = &nodes[i];
68a3a7fe 62
1da177e4
LT
63 if (nd->start < start) {
64 nd->start = start;
65 if (nd->end < nd->start)
66 nd->start = nd->end;
67 }
68 if (nd->end > end) {
1da177e4
LT
69 nd->end = end;
70 if (nd->start > nd->end)
71 nd->start = nd->end;
72 }
73}
74
75static __init void bad_srat(void)
76{
2bce2b54 77 int i;
1da177e4
LT
78 printk(KERN_ERR "SRAT: SRAT not used.\n");
79 acpi_numa = -1;
2bce2b54
AK
80 for (i = 0; i < MAX_LOCAL_APIC; i++)
81 apicid_to_node[i] = NUMA_NO_NODE;
429b2b31
AK
82 for (i = 0; i < MAX_NUMNODES; i++) {
83 nodes[i].start = nodes[i].end = 0;
84 nodes_add[i].start = nodes_add[i].end = 0;
85 }
5cb248ab 86 remove_all_active_ranges();
1da177e4
LT
87}
88
89static __init inline int srat_disabled(void)
90{
91 return numa_off || acpi_numa < 0;
92}
93
94/* Callback for SLIT parsing */
95void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
96{
f302a5bb
YL
97 unsigned length;
98 unsigned long phys;
99
100 length = slit->header.length;
101 phys = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, length,
102 PAGE_SIZE);
103
104 if (phys == -1L)
105 panic(" Can not save slit!\n");
106
107 acpi_slit = __va(phys);
108 memcpy(acpi_slit, slit, length);
109 reserve_early(phys, phys + length, "ACPI SLIT");
1da177e4
LT
110}
111
7237d3de
SS
112/* Callback for Proximity Domain -> x2APIC mapping */
113void __init
114acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
115{
116 int pxm, node;
117 int apic_id;
118
119 if (srat_disabled())
120 return;
121 if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
122 bad_srat();
123 return;
124 }
125 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
126 return;
127 pxm = pa->proximity_domain;
128 node = setup_node(pxm);
129 if (node < 0) {
130 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
131 bad_srat();
132 return;
133 }
134
135 apic_id = pa->apic_id;
136 apicid_to_node[apic_id] = node;
dc098551 137 node_set(node, cpu_nodes_parsed);
7237d3de 138 acpi_numa = 1;
163d3866 139 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
7237d3de
SS
140 pxm, apic_id, node);
141}
142
1da177e4
LT
143/* Callback for Proximity Domain -> LAPIC mapping */
144void __init
15a58ed1 145acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
1da177e4
LT
146{
147 int pxm, node;
ef97001f 148 int apic_id;
149
d22fe808
AK
150 if (srat_disabled())
151 return;
15a58ed1 152 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
fad7906d 153 bad_srat();
d22fe808
AK
154 return;
155 }
15a58ed1 156 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
1da177e4 157 return;
15a58ed1 158 pxm = pa->proximity_domain_lo;
1da177e4
LT
159 node = setup_node(pxm);
160 if (node < 0) {
161 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
162 bad_srat();
163 return;
164 }
beafe91f 165
2e42060c 166 if (get_uv_system_type() >= UV_X2APIC)
a65d1d64
JS
167 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
168 else
169 apic_id = pa->apic_id;
ef97001f 170 apicid_to_node[apic_id] = node;
dc098551 171 node_set(node, cpu_nodes_parsed);
1da177e4 172 acpi_numa = 1;
163d3866 173 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
ef97001f 174 pxm, apic_id, node);
1da177e4
LT
175}
176
71efa8fd
KM
177#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
178static inline int save_add_info(void) {return 1;}
179#else
180static inline int save_add_info(void) {return 0;}
181#endif
68a3a7fe 182/*
888a589f
YL
183 * Update nodes_add[]
184 * This code supports one contiguous hot add area per node
68a3a7fe 185 */
888a589f
YL
186static void __init
187update_nodes_add(int node, unsigned long start, unsigned long end)
68a3a7fe
AK
188{
189 unsigned long s_pfn = start >> PAGE_SHIFT;
190 unsigned long e_pfn = end >> PAGE_SHIFT;
888a589f 191 int changed = 0;
68a3a7fe
AK
192 struct bootnode *nd = &nodes_add[node];
193
194 /* I had some trouble with strange memory hotadd regions breaking
195 the boot. Be very strict here and reject anything unexpected.
196 If you want working memory hotadd write correct SRATs.
197
198 The node size check is a basic sanity check to guard against
199 mistakes */
200 if ((signed long)(end - start) < NODE_MIN_SIZE) {
201 printk(KERN_ERR "SRAT: Hotplug area too small\n");
888a589f 202 return;
68a3a7fe
AK
203 }
204
205 /* This check might be a bit too strict, but I'm keeping it for now. */
5cb248ab 206 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
9c7cd687
MG
207 printk(KERN_ERR
208 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
209 s_pfn, e_pfn);
888a589f 210 return;
68a3a7fe
AK
211 }
212
213 /* Looks good */
214
68a3a7fe 215 if (nd->start == nd->end) {
15a58ed1
AS
216 nd->start = start;
217 nd->end = end;
68a3a7fe 218 changed = 1;
15a58ed1
AS
219 } else {
220 if (nd->start == end) {
221 nd->start = start;
68a3a7fe
AK
222 changed = 1;
223 }
15a58ed1
AS
224 if (nd->end == start) {
225 nd->end = end;
68a3a7fe
AK
226 changed = 1;
227 }
228 if (!changed)
229 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
15a58ed1 230 }
68a3a7fe 231
3a5fc0e4
DR
232 if (changed) {
233 node_set(node, cpu_nodes_parsed);
888a589f
YL
234 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
235 nd->start, nd->end);
3a5fc0e4 236 }
68a3a7fe 237}
68a3a7fe 238
1da177e4
LT
239/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
240void __init
15a58ed1 241acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
1da177e4 242{
68a3a7fe 243 struct bootnode *nd, oldnode;
1da177e4
LT
244 unsigned long start, end;
245 int node, pxm;
246 int i;
247
d22fe808 248 if (srat_disabled())
1da177e4 249 return;
15a58ed1 250 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
d22fe808
AK
251 bad_srat();
252 return;
253 }
15a58ed1 254 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
d22fe808 255 return;
15a58ed1
AS
256
257 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
68a3a7fe 258 return;
15a58ed1
AS
259 start = ma->base_address;
260 end = start + ma->length;
1da177e4
LT
261 pxm = ma->proximity_domain;
262 node = setup_node(pxm);
263 if (node < 0) {
264 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
265 bad_srat();
266 return;
267 }
6ec6e0d9 268 i = conflicting_memblks(start, end);
05d1fa4b
AK
269 if (i == node) {
270 printk(KERN_WARNING
271 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
272 pxm, start, end, nodes[i].start, nodes[i].end);
273 } else if (i >= 0) {
1da177e4 274 printk(KERN_ERR
05d1fa4b
AK
275 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
276 pxm, start, end, node_to_pxm(i),
277 nodes[i].start, nodes[i].end);
1da177e4
LT
278 bad_srat();
279 return;
280 }
281 nd = &nodes[node];
68a3a7fe 282 oldnode = *nd;
1da177e4
LT
283 if (!node_test_and_set(node, nodes_parsed)) {
284 nd->start = start;
285 nd->end = end;
286 } else {
287 if (start < nd->start)
288 nd->start = start;
289 if (nd->end < end)
290 nd->end = end;
291 }
68a3a7fe 292
6ec6e0d9
SS
293 printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
294 start, end);
68a3a7fe 295
888a589f
YL
296 if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
297 update_nodes_add(node, start, end);
298 /* restore nodes[node] */
68a3a7fe
AK
299 *nd = oldnode;
300 if ((nd->start | nd->end) == 0)
301 node_clear(node, nodes_parsed);
302 }
6ec6e0d9
SS
303
304 node_memblk_range[num_node_memblks].start = start;
305 node_memblk_range[num_node_memblks].end = end;
306 memblk_nodeid[num_node_memblks] = node;
307 num_node_memblks++;
1da177e4
LT
308}
309
8a6fdd3e
AK
310/* Sanity check to catch more bad SRATs (they are amazingly common).
311 Make sure the PXMs cover all memory. */
3484d798 312static int __init nodes_cover_memory(const struct bootnode *nodes)
8a6fdd3e
AK
313{
314 int i;
315 unsigned long pxmram, e820ram;
316
317 pxmram = 0;
318 for_each_node_mask(i, nodes_parsed) {
319 unsigned long s = nodes[i].start >> PAGE_SHIFT;
320 unsigned long e = nodes[i].end >> PAGE_SHIFT;
321 pxmram += e - s;
32996250 322 pxmram -= __absent_pages_in_range(i, s, e);
68a3a7fe
AK
323 if ((long)pxmram < 0)
324 pxmram = 0;
8a6fdd3e
AK
325 }
326
b37ab919 327 e820ram = max_pfn - (e820_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
0964b056
YL
328 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
329 if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
8a6fdd3e
AK
330 printk(KERN_ERR
331 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
332 (pxmram << PAGE_SHIFT) >> 20,
333 (e820ram << PAGE_SHIFT) >> 20);
334 return 0;
335 }
336 return 1;
337}
338
1da177e4
LT
339void __init acpi_numa_arch_fixup(void) {}
340
8716273c
DR
341int __init acpi_get_nodes(struct bootnode *physnodes)
342{
343 int i;
344 int ret = 0;
345
346 for_each_node_mask(i, nodes_parsed) {
347 physnodes[ret].start = nodes[i].start;
348 physnodes[ret].end = nodes[i].end;
349 ret++;
350 }
351 return ret;
352}
353
1da177e4
LT
354/* Use the information discovered above to actually set up the nodes. */
355int __init acpi_scan_nodes(unsigned long start, unsigned long end)
356{
357 int i;
8a6fdd3e 358
ae2c6dcf
DR
359 if (acpi_numa <= 0)
360 return -1;
361
e58e0d03 362 /* First clean up the node list */
7c43769a 363 for (i = 0; i < MAX_NUMNODES; i++)
15a58ed1 364 cutoff_node(i, start, end);
e58e0d03 365
2e618786
JB
366 /*
367 * Join together blocks on the same node, holes between
368 * which don't overlap with memory on other nodes.
369 */
370 for (i = 0; i < num_node_memblks; ++i) {
371 int j, k;
372
373 for (j = i + 1; j < num_node_memblks; ++j) {
374 unsigned long start, end;
375
376 if (memblk_nodeid[i] != memblk_nodeid[j])
377 continue;
378 start = min(node_memblk_range[i].end,
379 node_memblk_range[j].end);
380 end = max(node_memblk_range[i].start,
381 node_memblk_range[j].start);
382 for (k = 0; k < num_node_memblks; ++k) {
383 if (memblk_nodeid[i] == memblk_nodeid[k])
384 continue;
385 if (start < node_memblk_range[k].end &&
386 end > node_memblk_range[k].start)
387 break;
388 }
389 if (k < num_node_memblks)
390 continue;
391 start = min(node_memblk_range[i].start,
392 node_memblk_range[j].start);
393 end = max(node_memblk_range[i].end,
394 node_memblk_range[j].end);
395 printk(KERN_INFO "SRAT: Node %d "
396 "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
397 memblk_nodeid[i],
398 node_memblk_range[i].start,
399 node_memblk_range[i].end,
400 node_memblk_range[j].start,
401 node_memblk_range[j].end,
402 start, end);
403 node_memblk_range[i].start = start;
404 node_memblk_range[i].end = end;
405 k = --num_node_memblks - j;
406 memmove(memblk_nodeid + j, memblk_nodeid + j+1,
407 k * sizeof(*memblk_nodeid));
408 memmove(node_memblk_range + j, node_memblk_range + j+1,
409 k * sizeof(*node_memblk_range));
410 --j;
411 }
412 }
413
6ec6e0d9
SS
414 memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
415 memblk_nodeid);
1da177e4
LT
416 if (memnode_shift < 0) {
417 printk(KERN_ERR
418 "SRAT: No NUMA node hash function found. Contact maintainer\n");
419 bad_srat();
420 return -1;
421 }
e58e0d03 422
8716273c
DR
423 for_each_node_mask(i, nodes_parsed)
424 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
425 nodes[i].end >> PAGE_SHIFT);
32996250
YL
426 /* for out of order entries in SRAT */
427 sort_node_map();
8716273c
DR
428 if (!nodes_cover_memory(nodes)) {
429 bad_srat();
430 return -1;
431 }
432
dc098551
JS
433 /* Account for nodes with cpus and no memory */
434 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
e3f1caee 435
e58e0d03 436 /* Finally register nodes */
e3f1caee 437 for_each_node_mask(i, node_possible_map)
1da177e4 438 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
a8062231
AK
439 /* Try again in case setup_node_bootmem missed one due
440 to missing bootmem */
e3f1caee 441 for_each_node_mask(i, node_possible_map)
a8062231
AK
442 if (!node_online(i))
443 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
444
168ef543 445 for (i = 0; i < nr_cpu_ids; i++) {
0164fe16
MT
446 int node = early_cpu_to_node(i);
447
834beda1 448 if (node == NUMA_NO_NODE)
1da177e4 449 continue;
7c43769a 450 if (!node_online(node))
23ca4bba 451 numa_clear_node(i);
1da177e4
LT
452 }
453 numa_init_array();
454 return 0;
455}
456
3484d798 457#ifdef CONFIG_NUMA_EMU
ef97001f 458static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
459 [0 ... MAX_NUMNODES-1] = PXM_INVAL
460};
602a54a8 461static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
ef97001f 462 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
463};
3484d798
DR
464static int __init find_node_by_addr(unsigned long addr)
465{
466 int ret = NUMA_NO_NODE;
467 int i;
468
469 for_each_node_mask(i, nodes_parsed) {
470 /*
471 * Find the real node that this emulated node appears on. For
472 * the sake of simplicity, we only use a real node's starting
473 * address to determine which emulated node it appears on.
474 */
475 if (addr >= nodes[i].start && addr < nodes[i].end) {
476 ret = i;
477 break;
478 }
479 }
9a1b62fe 480 return ret;
3484d798
DR
481}
482
483/*
484 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
485 * mappings that respect the real ACPI topology but reflect our emulated
486 * environment. For each emulated node, we find which real node it appears on
487 * and create PXM to NID mappings for those fake nodes which mirror that
488 * locality. SLIT will now represent the correct distances between emulated
489 * nodes as a result of the real topology.
490 */
491void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
492{
08705b89 493 int i, j;
3484d798
DR
494
495 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
496 "topology.\n");
497 for (i = 0; i < num_nodes; i++) {
498 int nid, pxm;
499
500 nid = find_node_by_addr(fake_nodes[i].start);
501 if (nid == NUMA_NO_NODE)
502 continue;
503 pxm = node_to_pxm(nid);
504 if (pxm == PXM_INVAL)
505 continue;
506 fake_node_to_pxm_map[i] = pxm;
08705b89
DR
507 /*
508 * For each apicid_to_node mapping that exists for this real
509 * node, it must now point to the fake node ID.
510 */
511 for (j = 0; j < MAX_LOCAL_APIC; j++)
512 if (apicid_to_node[j] == nid)
513 fake_apicid_to_node[j] = i;
3484d798
DR
514 }
515 for (i = 0; i < num_nodes; i++)
516 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
08705b89 517 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
3484d798
DR
518
519 nodes_clear(nodes_parsed);
520 for (i = 0; i < num_nodes; i++)
521 if (fake_nodes[i].start != fake_nodes[i].end)
522 node_set(i, nodes_parsed);
3484d798
DR
523}
524
525static int null_slit_node_compare(int a, int b)
526{
527 return node_to_pxm(a) == node_to_pxm(b);
528}
529#else
530static int null_slit_node_compare(int a, int b)
531{
532 return a == b;
533}
534#endif /* CONFIG_NUMA_EMU */
535
1da177e4
LT
536int __node_distance(int a, int b)
537{
538 int index;
539
540 if (!acpi_slit)
3484d798
DR
541 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
542 REMOTE_DISTANCE;
15a58ed1 543 index = acpi_slit->locality_count * node_to_pxm(a);
1da177e4
LT
544 return acpi_slit->entry[index + node_to_pxm(b)];
545}
546
547EXPORT_SYMBOL(__node_distance);
4942e998 548
6a1673ae 549#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
4942e998
KM
550int memory_add_physaddr_to_nid(u64 start)
551{
552 int i, ret = 0;
553
554 for_each_node(i)
555 if (nodes_add[i].start <= start && nodes_add[i].end > start)
556 ret = i;
557
558 return ret;
559}
8c2676a5 560EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
6a1673ae 561#endif