]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86_64/mm/srat.c
[PATCH] i386/x86-64: Move acpi_disabled variables into acpi/boot.c
[net-next-2.6.git] / arch / x86_64 / mm / srat.c
CommitLineData
1da177e4
LT
1/*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12#include <linux/kernel.h>
13#include <linux/acpi.h>
14#include <linux/mmzone.h>
15#include <linux/bitmap.h>
16#include <linux/module.h>
17#include <linux/topology.h>
68a3a7fe
AK
18#include <linux/bootmem.h>
19#include <linux/mm.h>
1da177e4
LT
20#include <asm/proto.h>
21#include <asm/numa.h>
8a6fdd3e 22#include <asm/e820.h>
1da177e4 23
68a3a7fe
AK
24#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
25 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \
26 && !defined(CONFIG_MEMORY_HOTPLUG)
27#define RESERVE_HOTADD 1
28#endif
29
1da177e4
LT
30static struct acpi_table_slit *acpi_slit;
31
32static nodemask_t nodes_parsed __initdata;
abe059e7 33static struct bootnode nodes[MAX_NUMNODES] __initdata;
68a3a7fe
AK
34static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
35static int found_add_area __initdata;
fad7906d
AK
36int hotadd_percent __initdata = 0;
37#ifndef RESERVE_HOTADD
38#define hotadd_percent 0 /* Ignore all settings */
39#endif
1da177e4 40
9391a3f9
AK
41/* Too small nodes confuse the VM badly. Usually they result
42 from BIOS bugs. */
43#define NODE_MIN_SIZE (4*1024*1024)
44
1da177e4
LT
45static __init int setup_node(int pxm)
46{
762834e8 47 return acpi_map_pxm_to_node(pxm);
1da177e4
LT
48}
49
50static __init int conflicting_nodes(unsigned long start, unsigned long end)
51{
52 int i;
4b6a455c 53 for_each_node_mask(i, nodes_parsed) {
abe059e7 54 struct bootnode *nd = &nodes[i];
1da177e4
LT
55 if (nd->start == nd->end)
56 continue;
57 if (nd->end > start && nd->start < end)
05d1fa4b 58 return i;
1da177e4 59 if (nd->end == end && nd->start == start)
05d1fa4b 60 return i;
1da177e4
LT
61 }
62 return -1;
63}
64
65static __init void cutoff_node(int i, unsigned long start, unsigned long end)
66{
abe059e7 67 struct bootnode *nd = &nodes[i];
68a3a7fe
AK
68
69 if (found_add_area)
70 return;
71
1da177e4
LT
72 if (nd->start < start) {
73 nd->start = start;
74 if (nd->end < nd->start)
75 nd->start = nd->end;
76 }
77 if (nd->end > end) {
1da177e4
LT
78 nd->end = end;
79 if (nd->start > nd->end)
80 nd->start = nd->end;
81 }
82}
83
84static __init void bad_srat(void)
85{
2bce2b54 86 int i;
1da177e4
LT
87 printk(KERN_ERR "SRAT: SRAT not used.\n");
88 acpi_numa = -1;
fad7906d 89 found_add_area = 0;
2bce2b54
AK
90 for (i = 0; i < MAX_LOCAL_APIC; i++)
91 apicid_to_node[i] = NUMA_NO_NODE;
68a3a7fe
AK
92 for (i = 0; i < MAX_NUMNODES; i++)
93 nodes_add[i].start = nodes[i].end = 0;
1da177e4
LT
94}
95
96static __init inline int srat_disabled(void)
97{
98 return numa_off || acpi_numa < 0;
99}
100
1584b89c
AK
101/*
102 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
103 * up the NUMA heuristics which wants the local node to have a smaller
104 * distance than the others.
105 * Do some quick checks here and only use the SLIT if it passes.
106 */
107static __init int slit_valid(struct acpi_table_slit *slit)
108{
109 int i, j;
110 int d = slit->localities;
111 for (i = 0; i < d; i++) {
112 for (j = 0; j < d; j++) {
113 u8 val = slit->entry[d*i + j];
114 if (i == j) {
115 if (val != 10)
116 return 0;
117 } else if (val <= 10)
118 return 0;
119 }
120 }
121 return 1;
122}
123
1da177e4
LT
124/* Callback for SLIT parsing */
125void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
126{
1584b89c
AK
127 if (!slit_valid(slit)) {
128 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
129 return;
130 }
1da177e4
LT
131 acpi_slit = slit;
132}
133
134/* Callback for Proximity Domain -> LAPIC mapping */
135void __init
136acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
137{
138 int pxm, node;
d22fe808
AK
139 if (srat_disabled())
140 return;
fad7906d
AK
141 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
142 bad_srat();
d22fe808
AK
143 return;
144 }
145 if (pa->flags.enabled == 0)
1da177e4
LT
146 return;
147 pxm = pa->proximity_domain;
148 node = setup_node(pxm);
149 if (node < 0) {
150 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
151 bad_srat();
152 return;
153 }
0b07e984 154 apicid_to_node[pa->apic_id] = node;
1da177e4 155 acpi_numa = 1;
0b07e984
AK
156 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
157 pxm, pa->apic_id, node);
1da177e4
LT
158}
159
68a3a7fe
AK
160#ifdef RESERVE_HOTADD
161/*
162 * Protect against too large hotadd areas that would fill up memory.
163 */
164static int hotadd_enough_memory(struct bootnode *nd)
165{
166 static unsigned long allocated;
167 static unsigned long last_area_end;
168 unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT;
169 long mem = pages * sizeof(struct page);
170 unsigned long addr;
171 unsigned long allowed;
172 unsigned long oldpages = pages;
173
174 if (mem < 0)
175 return 0;
176 allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
177 allowed = (allowed / 100) * hotadd_percent;
178 if (allocated + mem > allowed) {
fad7906d 179 unsigned long range;
68a3a7fe
AK
180 /* Give them at least part of their hotadd memory upto hotadd_percent
181 It would be better to spread the limit out
182 over multiple hotplug areas, but that is too complicated
183 right now */
184 if (allocated >= allowed)
185 return 0;
fad7906d
AK
186 range = allowed - allocated;
187 pages = (range / PAGE_SIZE);
68a3a7fe 188 mem = pages * sizeof(struct page);
fad7906d 189 nd->end = nd->start + range;
68a3a7fe
AK
190 }
191 /* Not completely fool proof, but a good sanity check */
192 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
193 if (addr == -1UL)
194 return 0;
195 if (pages != oldpages)
196 printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n",
197 pages << PAGE_SHIFT);
198 last_area_end = addr + mem;
199 allocated += mem;
200 return 1;
201}
202
203/*
204 * It is fine to add this area to the nodes data it will be used later
205 * This code supports one contigious hot add area per node.
206 */
207static int reserve_hotadd(int node, unsigned long start, unsigned long end)
208{
209 unsigned long s_pfn = start >> PAGE_SHIFT;
210 unsigned long e_pfn = end >> PAGE_SHIFT;
211 int changed = 0;
212 struct bootnode *nd = &nodes_add[node];
213
214 /* I had some trouble with strange memory hotadd regions breaking
215 the boot. Be very strict here and reject anything unexpected.
216 If you want working memory hotadd write correct SRATs.
217
218 The node size check is a basic sanity check to guard against
219 mistakes */
220 if ((signed long)(end - start) < NODE_MIN_SIZE) {
221 printk(KERN_ERR "SRAT: Hotplug area too small\n");
222 return -1;
223 }
224
225 /* This check might be a bit too strict, but I'm keeping it for now. */
226 if (e820_hole_size(s_pfn, e_pfn) != e_pfn - s_pfn) {
227 printk(KERN_ERR "SRAT: Hotplug area has existing memory\n");
228 return -1;
229 }
230
231 if (!hotadd_enough_memory(&nodes_add[node])) {
232 printk(KERN_ERR "SRAT: Hotplug area too large\n");
233 return -1;
234 }
235
236 /* Looks good */
237
238 found_add_area = 1;
239 if (nd->start == nd->end) {
240 nd->start = start;
241 nd->end = end;
242 changed = 1;
243 } else {
244 if (nd->start == end) {
245 nd->start = start;
246 changed = 1;
247 }
248 if (nd->end == start) {
249 nd->end = end;
250 changed = 1;
251 }
252 if (!changed)
253 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
254 }
255
256 if ((nd->end >> PAGE_SHIFT) > end_pfn)
257 end_pfn = nd->end >> PAGE_SHIFT;
258
259 if (changed)
260 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
261 return 0;
262}
263#endif
264
1da177e4
LT
265/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
266void __init
267acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
268{
68a3a7fe 269 struct bootnode *nd, oldnode;
1da177e4
LT
270 unsigned long start, end;
271 int node, pxm;
272 int i;
273
d22fe808 274 if (srat_disabled())
1da177e4 275 return;
d22fe808
AK
276 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
277 bad_srat();
278 return;
279 }
280 if (ma->flags.enabled == 0)
281 return;
68a3a7fe
AK
282 if (ma->flags.hot_pluggable && hotadd_percent == 0)
283 return;
d22fe808
AK
284 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
285 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
1da177e4
LT
286 pxm = ma->proximity_domain;
287 node = setup_node(pxm);
288 if (node < 0) {
289 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
290 bad_srat();
291 return;
292 }
1da177e4 293 i = conflicting_nodes(start, end);
05d1fa4b
AK
294 if (i == node) {
295 printk(KERN_WARNING
296 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
297 pxm, start, end, nodes[i].start, nodes[i].end);
298 } else if (i >= 0) {
1da177e4 299 printk(KERN_ERR
05d1fa4b
AK
300 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
301 pxm, start, end, node_to_pxm(i),
302 nodes[i].start, nodes[i].end);
1da177e4
LT
303 bad_srat();
304 return;
305 }
306 nd = &nodes[node];
68a3a7fe 307 oldnode = *nd;
1da177e4
LT
308 if (!node_test_and_set(node, nodes_parsed)) {
309 nd->start = start;
310 nd->end = end;
311 } else {
312 if (start < nd->start)
313 nd->start = start;
314 if (nd->end < end)
315 nd->end = end;
316 }
68a3a7fe 317
1da177e4
LT
318 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
319 nd->start, nd->end);
68a3a7fe
AK
320
321#ifdef RESERVE_HOTADD
322 if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) {
323 /* Ignore hotadd region. Undo damage */
324 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
325 *nd = oldnode;
326 if ((nd->start | nd->end) == 0)
327 node_clear(node, nodes_parsed);
328 }
329#endif
1da177e4
LT
330}
331
8a6fdd3e
AK
332/* Sanity check to catch more bad SRATs (they are amazingly common).
333 Make sure the PXMs cover all memory. */
334static int nodes_cover_memory(void)
335{
336 int i;
337 unsigned long pxmram, e820ram;
338
339 pxmram = 0;
340 for_each_node_mask(i, nodes_parsed) {
341 unsigned long s = nodes[i].start >> PAGE_SHIFT;
342 unsigned long e = nodes[i].end >> PAGE_SHIFT;
343 pxmram += e - s;
344 pxmram -= e820_hole_size(s, e);
68a3a7fe
AK
345 pxmram -= nodes_add[i].end - nodes_add[i].start;
346 if ((long)pxmram < 0)
347 pxmram = 0;
8a6fdd3e
AK
348 }
349
350 e820ram = end_pfn - e820_hole_size(0, end_pfn);
fdb9df94
AK
351 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
352 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
8a6fdd3e
AK
353 printk(KERN_ERR
354 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
355 (pxmram << PAGE_SHIFT) >> 20,
356 (e820ram << PAGE_SHIFT) >> 20);
357 return 0;
358 }
359 return 1;
360}
361
9391a3f9
AK
362static void unparse_node(int node)
363{
364 int i;
365 node_clear(node, nodes_parsed);
366 for (i = 0; i < MAX_LOCAL_APIC; i++) {
367 if (apicid_to_node[i] == node)
368 apicid_to_node[i] = NUMA_NO_NODE;
369 }
370}
371
1da177e4
LT
372void __init acpi_numa_arch_fixup(void) {}
373
374/* Use the information discovered above to actually set up the nodes. */
375int __init acpi_scan_nodes(unsigned long start, unsigned long end)
376{
377 int i;
8a6fdd3e 378
e58e0d03 379 /* First clean up the node list */
9391a3f9 380 for (i = 0; i < MAX_NUMNODES; i++) {
68a3a7fe 381 cutoff_node(i, start, end);
0d015324 382 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
9391a3f9 383 unparse_node(i);
0d015324
DY
384 node_set_offline(i);
385 }
e58e0d03
AK
386 }
387
9391a3f9
AK
388 if (acpi_numa <= 0)
389 return -1;
390
8a6fdd3e
AK
391 if (!nodes_cover_memory()) {
392 bad_srat();
393 return -1;
394 }
395
2aed711a 396 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
1da177e4
LT
397 if (memnode_shift < 0) {
398 printk(KERN_ERR
399 "SRAT: No NUMA node hash function found. Contact maintainer\n");
400 bad_srat();
401 return -1;
402 }
e58e0d03
AK
403
404 /* Finally register nodes */
405 for_each_node_mask(i, nodes_parsed)
1da177e4 406 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
a8062231
AK
407 /* Try again in case setup_node_bootmem missed one due
408 to missing bootmem */
409 for_each_node_mask(i, nodes_parsed)
410 if (!node_online(i))
411 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
412
1da177e4
LT
413 for (i = 0; i < NR_CPUS; i++) {
414 if (cpu_to_node[i] == NUMA_NO_NODE)
415 continue;
416 if (!node_isset(cpu_to_node[i], nodes_parsed))
69d81fcd 417 numa_set_node(i, NUMA_NO_NODE);
1da177e4
LT
418 }
419 numa_init_array();
420 return 0;
421}
422
68a3a7fe
AK
423void __init srat_reserve_add_area(int nodeid)
424{
425 if (found_add_area && nodes_add[nodeid].end) {
426 u64 total_mb;
427
428 printk(KERN_INFO "SRAT: Reserving hot-add memory space "
429 "for node %d at %Lx-%Lx\n",
430 nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end);
431 total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start)
432 >> PAGE_SHIFT;
433 total_mb *= sizeof(struct page);
434 total_mb >>= 20;
435 printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
436 "pre-allocated memory.\n", (unsigned long long)total_mb);
437 reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
438 nodes_add[nodeid].end - nodes_add[nodeid].start);
439 }
440}
441
1da177e4
LT
442int __node_distance(int a, int b)
443{
444 int index;
445
446 if (!acpi_slit)
447 return a == b ? 10 : 20;
448 index = acpi_slit->localities * node_to_pxm(a);
449 return acpi_slit->entry[index + node_to_pxm(b)];
450}
451
452EXPORT_SYMBOL(__node_distance);