1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
34 #define BOOT_PERCPU_OFFSET 0
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
43 EXPORT_SYMBOL(__per_cpu_offset);
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
55 #define PERCPU_FIRST_CHUNK_RESERVE 0
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
81 last = NODE_DATA(node);
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
98 * Pointer to the allocated area on success, NULL on failure.
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
122 return __alloc_bootmem_nopanic(size, align, goal);
127 * Helpers for first chunk memory allocation
129 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size)
131 return pcpu_alloc_bootmem(cpu, size, size);
134 static void __init pcpu_fc_free(void *ptr, size_t size)
136 free_bootmem(__pa(ptr), size);
140 * Large page remapping allocator
142 #ifdef CONFIG_NEED_MULTIPLE_NODES
143 static void __init pcpul_map(void *ptr, size_t size, void *addr)
147 pmd = populate_extra_pmd((unsigned long)addr);
148 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
152 static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
154 if (early_cpu_to_node(from) == early_cpu_to_node(to))
155 return LOCAL_DISTANCE;
157 return REMOTE_DISTANCE;
160 static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
164 size_t unit_map_size, unit_size;
169 /* on non-NUMA, embedding is better */
170 if (!chosen && !pcpu_need_numa())
175 pr_warning("PERCPU: lpage allocator requires PSE\n");
179 /* allocate and build unit_map */
180 unit_map_size = nr_cpu_ids * sizeof(int);
181 unit_map = alloc_bootmem_nopanic(unit_map_size);
183 pr_warning("PERCPU: failed to allocate unit_map\n");
187 ret = pcpu_lpage_build_unit_map(static_size,
188 PERCPU_FIRST_CHUNK_RESERVE,
189 &dyn_size, &unit_size, PMD_SIZE,
190 unit_map, pcpu_lpage_cpu_distance);
192 pr_warning("PERCPU: failed to build unit_map\n");
197 /* do the parameters look okay? */
199 size_t vm_size = VMALLOC_END - VMALLOC_START;
200 size_t tot_size = nr_units * unit_size;
202 /* don't consume more than 20% of vmalloc area */
203 if (tot_size > vm_size / 5) {
204 pr_info("PERCPU: too large chunk size %zuMB for "
205 "large page remap\n", tot_size >> 20);
211 ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
212 dyn_size, unit_size, PMD_SIZE,
214 pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
217 free_bootmem(__pa(unit_map), unit_map_size);
221 static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
228 * Embedding allocator
230 * The first chunk is sized to just contain the static area plus
231 * module and dynamic reserves and embedded into linear physical
232 * mapping so that it can use PMD mapping without additional TLB
235 static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
237 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
240 * If large page isn't supported, there's no benefit in doing
241 * this. Also, embedding allocation doesn't play well with
244 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
247 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
248 reserve - PERCPU_FIRST_CHUNK_RESERVE);
254 * Boring fallback 4k page allocator. This allocator puts more
255 * pressure on PTE TLBs but other than that behaves nicely on both UMA
258 static void __init pcpup_populate_pte(unsigned long addr)
260 populate_extra_pte(addr);
263 static ssize_t __init setup_pcpu_page(size_t static_size)
265 return pcpu_page_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
266 pcpu_fc_alloc, pcpu_fc_free,
270 static inline void setup_percpu_segment(int cpu)
273 struct desc_struct gdt;
275 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
276 0x2 | DESCTYPE_S, 0x8);
278 write_gdt_entry(get_cpu_gdt_table(cpu),
279 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
283 void __init setup_per_cpu_areas(void)
285 size_t static_size = __per_cpu_end - __per_cpu_start;
288 size_t pcpu_unit_size;
291 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
292 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
295 * Allocate percpu area. If PSE is supported, try to make use
296 * of large page mappings. Please read comments on top of
297 * each allocator for details.
300 if (pcpu_chosen_fc != PCPU_FC_AUTO) {
301 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
302 if (pcpu_chosen_fc == PCPU_FC_LPAGE)
303 ret = setup_pcpu_lpage(static_size, true);
305 ret = setup_pcpu_embed(static_size, true);
308 pr_warning("PERCPU: %s allocator failed (%zd), "
309 "falling back to page size\n",
310 pcpu_fc_names[pcpu_chosen_fc], ret);
313 ret = setup_pcpu_lpage(static_size, false);
315 ret = setup_pcpu_embed(static_size, false);
318 ret = setup_pcpu_page(static_size);
320 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
323 pcpu_unit_size = ret;
325 /* alrighty, percpu areas up and running */
326 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
327 for_each_possible_cpu(cpu) {
328 per_cpu_offset(cpu) =
329 delta + pcpu_unit_map[cpu] * pcpu_unit_size;
330 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
331 per_cpu(cpu_number, cpu) = cpu;
332 setup_percpu_segment(cpu);
333 setup_stack_canary_segment(cpu);
335 * Copy data used in early init routines from the
336 * initial arrays to the per cpu data areas. These
337 * arrays then become expendable and the *_early_ptr's
338 * are zeroed indicating that the static arrays are
341 #ifdef CONFIG_X86_LOCAL_APIC
342 per_cpu(x86_cpu_to_apicid, cpu) =
343 early_per_cpu_map(x86_cpu_to_apicid, cpu);
344 per_cpu(x86_bios_cpu_apicid, cpu) =
345 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
348 per_cpu(irq_stack_ptr, cpu) =
349 per_cpu(irq_stack_union.irq_stack, cpu) +
352 per_cpu(x86_cpu_to_node_map, cpu) =
353 early_per_cpu_map(x86_cpu_to_node_map, cpu);
357 * Up to this point, the boot CPU has been using .data.init
358 * area. Reload any changed state for the boot CPU.
360 if (cpu == boot_cpu_id)
361 switch_to_new_gdt(cpu);
364 /* indicate the early static arrays will soon be gone */
365 #ifdef CONFIG_X86_LOCAL_APIC
366 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
367 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
369 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
370 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
373 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
375 * make sure boot cpu node_number is right, when boot cpu is on the
376 * node that doesn't have mem installed
378 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
381 /* Setup node to cpumask map */
382 setup_node_to_cpumask_map();
384 /* Setup cpu initialized, callin, callout masks */
385 setup_cpu_local_masks();