]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/setup_percpu.c
x86: use static _cpu_pda array
[net-next-2.6.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
4fe29a85
GOC
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
1ecd2765 6#include <linux/kexec.h>
17b4cceb 7#include <linux/crash_dump.h>
8a87dd9a
JSR
8#include <linux/smp.h>
9#include <linux/topology.h>
4fe29a85
GOC
10#include <asm/sections.h>
11#include <asm/processor.h>
12#include <asm/setup.h>
0fc0906e 13#include <asm/mpspec.h>
76eb4131 14#include <asm/apicdef.h>
1ecd2765 15#include <asm/highmem.h>
06879033 16#include <asm/cpumask.h>
76eb4131 17
c90aa894
MT
18#ifdef CONFIG_DEBUG_PER_CPU_MAPS
19# define DBG(x...) printk(KERN_DEBUG x)
20#else
21# define DBG(x...)
22#endif
23
f8955ebe 24#ifdef CONFIG_X86_LOCAL_APIC
2fe60147
AS
25unsigned int num_processors;
26unsigned disabled_cpus __cpuinitdata;
27/* Processor that is doing the boot up */
28unsigned int boot_cpu_physical_apicid = -1U;
29EXPORT_SYMBOL(boot_cpu_physical_apicid);
8a87dd9a 30unsigned int max_physical_apicid;
2fe60147 31
0fc0906e
AS
32/* Bitmask of physically existing CPUs */
33physid_mask_t phys_cpu_present_map;
f8955ebe 34#endif
0fc0906e 35
c90aa894
MT
36/*
37 * Map cpu index to physical APIC ID
38 */
23ca4bba
MT
39DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
40DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
41EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
42EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
43
44#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
c90aa894 45#define X86_64_NUMA 1 /* (used later) */
23ca4bba 46
c90aa894
MT
47/*
48 * Map cpu index to node index
49 */
23ca4bba
MT
50DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
51EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
9f248bde 52
c90aa894
MT
53/*
54 * Which logical CPUs are on which nodes
55 */
9f248bde
MT
56cpumask_t *node_to_cpumask_map;
57EXPORT_SYMBOL(node_to_cpumask_map);
58
c90aa894
MT
59/*
60 * Setup node_to_cpumask_map
61 */
9f248bde
MT
62static void __init setup_node_to_cpumask_map(void);
63
64#else
65static inline void setup_node_to_cpumask_map(void) { }
23ca4bba
MT
66#endif
67
c90aa894 68#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
4fe29a85
GOC
69/*
70 * Copy data used in early init routines from the initial arrays to the
71 * per cpu data areas. These arrays then become expendable and the
72 * *_early_ptr's are zeroed indicating that the static arrays are gone.
73 */
74static void __init setup_per_cpu_maps(void)
75{
76 int cpu;
77
78 for_each_possible_cpu(cpu) {
23ca4bba
MT
79 per_cpu(x86_cpu_to_apicid, cpu) =
80 early_per_cpu_map(x86_cpu_to_apicid, cpu);
b447a468 81 per_cpu(x86_bios_cpu_apicid, cpu) =
23ca4bba
MT
82 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
83#ifdef X86_64_NUMA
b447a468 84 per_cpu(x86_cpu_to_node_map, cpu) =
23ca4bba 85 early_per_cpu_map(x86_cpu_to_node_map, cpu);
4fe29a85
GOC
86#endif
87 }
88
89 /* indicate the early static arrays will soon be gone */
23ca4bba
MT
90 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
91 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
92#ifdef X86_64_NUMA
93 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
4fe29a85
GOC
94#endif
95}
96
97#ifdef CONFIG_X86_32
98/*
99 * Great future not-so-futuristic plan: make i386 and x86_64 do it
100 * the same way
101 */
102unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
103EXPORT_SYMBOL(__per_cpu_offset);
3461b0af
MT
104static inline void setup_cpu_pda_map(void) { }
105
106#elif !defined(CONFIG_SMP)
107static inline void setup_cpu_pda_map(void) { }
108
109#else /* CONFIG_SMP && CONFIG_X86_64 */
110
111/*
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
113 */
114static void __init setup_cpu_pda_map(void)
115{
116 char *pda;
3461b0af
MT
117 unsigned long size;
118 int cpu;
119
120 size = roundup(sizeof(struct x8664_pda), cache_line_size());
121
122 /* allocate cpu_pda array and pointer table */
123 {
3461b0af
MT
124 unsigned long asize = size * (nr_cpu_ids - 1);
125
c8f3329a 126 pda = alloc_bootmem(asize);
3461b0af
MT
127 }
128
129 /* initialize pointer table to static pda's */
130 for_each_possible_cpu(cpu) {
131 if (cpu == 0) {
132 /* leave boot cpu pda in place */
3461b0af
MT
133 continue;
134 }
c8f3329a
TH
135 cpu_pda(cpu) = (struct x8664_pda *)pda;
136 cpu_pda(cpu)->in_bootmem = 1;
3461b0af
MT
137 pda += size;
138 }
3461b0af 139}
c2d1cec1
MT
140
141#endif /* CONFIG_SMP && CONFIG_X86_64 */
142
143#ifdef CONFIG_X86_64
144
145/* correctly size the local cpu masks */
146static void setup_cpu_local_masks(void)
147{
148 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
149 alloc_bootmem_cpumask_var(&cpu_callin_mask);
150 alloc_bootmem_cpumask_var(&cpu_callout_mask);
151 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
152}
153
154#else /* CONFIG_X86_32 */
155
156static inline void setup_cpu_local_masks(void)
157{
158}
159
160#endif /* CONFIG_X86_32 */
4fe29a85
GOC
161
162/*
163 * Great future plan:
164 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
165 * Always point %gs to its beginning
166 */
167void __init setup_per_cpu_areas(void)
168{
d6c88a50 169 ssize_t size, old_size;
3461b0af
MT
170 char *ptr;
171 int cpu;
1f8ff037 172 unsigned long align = 1;
4fe29a85 173
3461b0af
MT
174 /* Setup cpu_pda map */
175 setup_cpu_pda_map();
176
4fe29a85 177 /* Copy section for each CPU (we discard the original) */
1f3fcd4b 178 old_size = PERCPU_ENOUGH_ROOM;
1f8ff037 179 align = max_t(unsigned long, PAGE_SIZE, align);
d6c88a50 180 size = roundup(old_size, align);
a1681965 181
ab14398a 182 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
a1681965
MT
183 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
184
ab14398a 185 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
b447a468 186
3461b0af 187 for_each_possible_cpu(cpu) {
4fe29a85 188#ifndef CONFIG_NEED_MULTIPLE_NODES
1f8ff037
YL
189 ptr = __alloc_bootmem(size, align,
190 __pa(MAX_DMA_ADDRESS));
4fe29a85 191#else
3461b0af 192 int node = early_cpu_to_node(cpu);
b447a468 193 if (!node_online(node) || !NODE_DATA(node)) {
1f8ff037
YL
194 ptr = __alloc_bootmem(size, align,
195 __pa(MAX_DMA_ADDRESS));
ab14398a 196 pr_info("cpu %d has no node %d or node-local memory\n",
3461b0af 197 cpu, node);
ab14398a
CG
198 pr_debug("per cpu data for cpu%d at %016lx\n",
199 cpu, __pa(ptr));
200 } else {
1f8ff037
YL
201 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
202 __pa(MAX_DMA_ADDRESS));
ab14398a
CG
203 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
204 cpu, node, __pa(ptr));
a677f58a 205 }
4fe29a85 206#endif
3461b0af 207 per_cpu_offset(cpu) = ptr - __per_cpu_start;
3e5d8f97 208 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
c90aa894
MT
209
210 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
4fe29a85
GOC
211 }
212
b447a468 213 /* Setup percpu data maps */
4fe29a85 214 setup_per_cpu_maps();
9f0e8d04 215
9f248bde
MT
216 /* Setup node to cpumask map */
217 setup_node_to_cpumask_map();
c2d1cec1
MT
218
219 /* Setup cpu initialized, callin, callout masks */
220 setup_cpu_local_masks();
4fe29a85
GOC
221}
222
223#endif
c45a707d 224
23ca4bba 225#ifdef X86_64_NUMA
9f248bde
MT
226
227/*
228 * Allocate node_to_cpumask_map based on number of available nodes
229 * Requires node_possible_map to be valid.
230 *
231 * Note: node_to_cpumask() is not valid until after this is done.
c90aa894 232 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
9f248bde
MT
233 */
234static void __init setup_node_to_cpumask_map(void)
235{
236 unsigned int node, num = 0;
237 cpumask_t *map;
238
239 /* setup nr_node_ids if not done yet */
240 if (nr_node_ids == MAX_NUMNODES) {
241 for_each_node_mask(node, node_possible_map)
242 num = node;
243 nr_node_ids = num + 1;
244 }
245
246 /* allocate the map */
247 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
c90aa894 248 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
9f248bde 249
55410791 250 pr_debug("Node to cpumask map at %p for %d nodes\n",
cfc1b9a6 251 map, nr_node_ids);
9f248bde
MT
252
253 /* node_to_cpumask() will now work */
254 node_to_cpumask_map = map;
255}
256
23ca4bba
MT
257void __cpuinit numa_set_node(int cpu, int node)
258{
259 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
260
c90aa894
MT
261 /* early setting, no percpu area yet */
262 if (cpu_to_node_map) {
23ca4bba 263 cpu_to_node_map[cpu] = node;
c90aa894
MT
264 return;
265 }
23ca4bba 266
c90aa894
MT
267#ifdef CONFIG_DEBUG_PER_CPU_MAPS
268 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
269 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
270 dump_stack();
271 return;
272 }
273#endif
274 per_cpu(x86_cpu_to_node_map, cpu) = node;
23ca4bba 275
c90aa894
MT
276 if (node != NUMA_NO_NODE)
277 cpu_pda(cpu)->nodenumber = node;
23ca4bba
MT
278}
279
280void __cpuinit numa_clear_node(int cpu)
281{
282 numa_set_node(cpu, NUMA_NO_NODE);
283}
284
9f248bde
MT
285#ifndef CONFIG_DEBUG_PER_CPU_MAPS
286
23ca4bba
MT
287void __cpuinit numa_add_cpu(int cpu)
288{
289 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
290}
291
292void __cpuinit numa_remove_cpu(int cpu)
293{
c90aa894 294 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
23ca4bba 295}
23ca4bba 296
9f248bde
MT
297#else /* CONFIG_DEBUG_PER_CPU_MAPS */
298
299/*
300 * --------- debug versions of the numa functions ---------
301 */
302static void __cpuinit numa_set_cpumask(int cpu, int enable)
303{
c90aa894 304 int node = early_cpu_to_node(cpu);
9f248bde
MT
305 cpumask_t *mask;
306 char buf[64];
307
308 if (node_to_cpumask_map == NULL) {
309 printk(KERN_ERR "node_to_cpumask_map NULL\n");
310 dump_stack();
311 return;
312 }
313
314 mask = &node_to_cpumask_map[node];
315 if (enable)
316 cpu_set(cpu, *mask);
317 else
318 cpu_clear(cpu, *mask);
319
29c0177e 320 cpulist_scnprintf(buf, sizeof(buf), mask);
9f248bde 321 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
8a87dd9a
JSR
322 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
323}
9f248bde
MT
324
325void __cpuinit numa_add_cpu(int cpu)
326{
327 numa_set_cpumask(cpu, 1);
328}
329
330void __cpuinit numa_remove_cpu(int cpu)
331{
332 numa_set_cpumask(cpu, 0);
333}
23ca4bba
MT
334
335int cpu_to_node(int cpu)
336{
337 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
338 printk(KERN_WARNING
339 "cpu_to_node(%d): usage too early!\n", cpu);
340 dump_stack();
341 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
342 }
343 return per_cpu(x86_cpu_to_node_map, cpu);
344}
345EXPORT_SYMBOL(cpu_to_node);
346
9f248bde
MT
347/*
348 * Same function as cpu_to_node() but used if called before the
349 * per_cpu areas are setup.
350 */
23ca4bba
MT
351int early_cpu_to_node(int cpu)
352{
353 if (early_per_cpu_ptr(x86_cpu_to_node_map))
354 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
355
356 if (!per_cpu_offset(cpu)) {
357 printk(KERN_WARNING
358 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
9f248bde 359 dump_stack();
23ca4bba
MT
360 return NUMA_NO_NODE;
361 }
362 return per_cpu(x86_cpu_to_node_map, cpu);
363}
9f248bde 364
6a2f47ca
MT
365
366/* empty cpumask */
367static const cpumask_t cpu_mask_none;
368
9f248bde
MT
369/*
370 * Returns a pointer to the bitmask of CPUs on Node 'node'.
371 */
393d68fb 372const cpumask_t *cpumask_of_node(int node)
9f248bde
MT
373{
374 if (node_to_cpumask_map == NULL) {
375 printk(KERN_WARNING
393d68fb 376 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
9f248bde
MT
377 node);
378 dump_stack();
11369f35 379 return (const cpumask_t *)&cpu_online_map;
9f248bde 380 }
6a2f47ca
MT
381 if (node >= nr_node_ids) {
382 printk(KERN_WARNING
393d68fb 383 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
6a2f47ca
MT
384 node, nr_node_ids);
385 dump_stack();
11369f35 386 return &cpu_mask_none;
6a2f47ca 387 }
11369f35 388 return &node_to_cpumask_map[node];
9f248bde 389}
393d68fb 390EXPORT_SYMBOL(cpumask_of_node);
9f248bde
MT
391
392/*
393 * Returns a bitmask of CPUs on Node 'node'.
6a2f47ca
MT
394 *
395 * Side note: this function creates the returned cpumask on the stack
396 * so with a high NR_CPUS count, excessive stack space is used. The
397 * node_to_cpumask_ptr function should be used whenever possible.
9f248bde
MT
398 */
399cpumask_t node_to_cpumask(int node)
400{
401 if (node_to_cpumask_map == NULL) {
402 printk(KERN_WARNING
403 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
404 dump_stack();
405 return cpu_online_map;
406 }
6a2f47ca
MT
407 if (node >= nr_node_ids) {
408 printk(KERN_WARNING
409 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
410 node, nr_node_ids);
411 dump_stack();
412 return cpu_mask_none;
413 }
9f248bde
MT
414 return node_to_cpumask_map[node];
415}
416EXPORT_SYMBOL(node_to_cpumask);
417
418/*
419 * --------- end of debug versions of the numa functions ---------
420 */
421
422#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
423
424#endif /* X86_64_NUMA */
1ecd2765 425