]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_PERCPU_H |
2 | #define __LINUX_PERCPU_H | |
7ff6f082 | 3 | |
0a3021f4 | 4 | #include <linux/preempt.h> |
1da177e4 LT |
5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | |
7ff6f082 | 7 | #include <linux/cpumask.h> |
6a242909 | 8 | #include <linux/pfn.h> |
7ff6f082 | 9 | |
1da177e4 LT |
10 | #include <asm/percpu.h> |
11 | ||
d3770449 | 12 | #ifndef PER_CPU_BASE_SECTION |
5280e004 | 13 | #ifdef CONFIG_SMP |
0bd74fa8 | 14 | #define PER_CPU_BASE_SECTION ".data.percpu" |
d3770449 BG |
15 | #else |
16 | #define PER_CPU_BASE_SECTION ".data" | |
17 | #endif | |
18 | #endif | |
19 | ||
20 | #ifdef CONFIG_SMP | |
5280e004 | 21 | |
44c81433 | 22 | #ifdef MODULE |
0bd74fa8 | 23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
44c81433 | 24 | #else |
0bd74fa8 | 25 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" |
44c81433 | 26 | #endif |
0bd74fa8 | 27 | #define PER_CPU_FIRST_SECTION ".first" |
44c81433 | 28 | |
0bd74fa8 BG |
29 | #else |
30 | ||
0bd74fa8 BG |
31 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
32 | #define PER_CPU_FIRST_SECTION "" | |
33 | ||
34 | #endif | |
63cc8c75 | 35 | |
0bd74fa8 BG |
36 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ |
37 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | |
63cc8c75 | 38 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
0bd74fa8 | 39 | |
5280e004 | 40 | #define DEFINE_PER_CPU(type, name) \ |
0bd74fa8 | 41 | DEFINE_PER_CPU_SECTION(type, name, "") |
5280e004 | 42 | |
0bd74fa8 BG |
43 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
44 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | |
45 | ____cacheline_aligned_in_smp | |
63cc8c75 | 46 | |
0bd74fa8 BG |
47 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
48 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") | |
49 | ||
50 | #define DEFINE_PER_CPU_FIRST(type, name) \ | |
51 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | |
5280e004 | 52 | |
53 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | |
54 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | |
55 | ||
6a242909 | 56 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
b00742d3 | 57 | #ifdef CONFIG_MODULES |
6a242909 | 58 | #define PERCPU_MODULE_RESERVE (8 << 10) |
b00742d3 | 59 | #else |
6a242909 | 60 | #define PERCPU_MODULE_RESERVE 0 |
1da177e4 LT |
61 | #endif |
62 | ||
6a242909 | 63 | #ifndef PERCPU_ENOUGH_ROOM |
b00742d3 | 64 | #define PERCPU_ENOUGH_ROOM \ |
6a242909 TH |
65 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
66 | PERCPU_MODULE_RESERVE) | |
67 | #endif | |
b00742d3 | 68 | |
632bbfee JB |
69 | /* |
70 | * Must be an lvalue. Since @var must be a simple identifier, | |
71 | * we force a syntax error here if it isn't. | |
72 | */ | |
73 | #define get_cpu_var(var) (*({ \ | |
a666ecfb | 74 | extern int simple_identifier_##var(void); \ |
632bbfee JB |
75 | preempt_disable(); \ |
76 | &__get_cpu_var(var); })) | |
1da177e4 LT |
77 | #define put_cpu_var(var) preempt_enable() |
78 | ||
79 | #ifdef CONFIG_SMP | |
80 | ||
fbf59bc9 | 81 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA |
1da177e4 | 82 | |
8d408b4b | 83 | /* minimum unit size, also is the maximum supported allocation size */ |
6a242909 | 84 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
8d408b4b TH |
85 | |
86 | /* | |
87 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | |
6b19b0c2 TH |
88 | * back on the first chunk for dynamic percpu allocation if arch is |
89 | * manually allocating and mapping it for faster access (as a part of | |
90 | * large page mapping for example). | |
8d408b4b | 91 | * |
6b19b0c2 TH |
92 | * The following values give between one and two pages of free space |
93 | * after typical minimal boot (2-way SMP, single disk and NIC) with | |
94 | * both defconfig and a distro config on x86_64 and 32. More | |
95 | * intelligent way to determine this would be nice. | |
8d408b4b | 96 | */ |
6b19b0c2 TH |
97 | #if BITS_PER_LONG > 32 |
98 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | |
99 | #else | |
100 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | |
101 | #endif | |
8d408b4b | 102 | |
fbf59bc9 | 103 | extern void *pcpu_base_addr; |
1da177e4 | 104 | |
8d408b4b | 105 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); |
fbf59bc9 TH |
106 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); |
107 | ||
8d408b4b | 108 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, |
edcb4639 | 109 | size_t static_size, size_t reserved_size, |
6074d5b0 | 110 | ssize_t dyn_size, ssize_t unit_size, |
edcb4639 TH |
111 | void *base_addr, |
112 | pcpu_populate_pte_fn_t populate_pte_fn); | |
8d408b4b | 113 | |
f2a8205c TH |
114 | /* |
115 | * Use this to get to a cpu's version of the per-cpu object | |
116 | * dynamically allocated. Non-atomic access to the current CPU's | |
117 | * version should probably be combined with get_cpu()/put_cpu(). | |
118 | */ | |
fbf59bc9 TH |
119 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
120 | ||
edcb4639 TH |
121 | extern void *__alloc_reserved_percpu(size_t size, size_t align); |
122 | ||
fbf59bc9 TH |
123 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
124 | ||
125 | struct percpu_data { | |
126 | void *ptrs[1]; | |
127 | }; | |
128 | ||
129 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | |
130 | ||
f2a8205c TH |
131 | #define per_cpu_ptr(ptr, cpu) \ |
132 | ({ \ | |
133 | struct percpu_data *__p = __percpu_disguise(ptr); \ | |
134 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | |
135 | }) | |
136 | ||
fbf59bc9 TH |
137 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
138 | ||
f2a8205c TH |
139 | extern void *__alloc_percpu(size_t size, size_t align); |
140 | extern void free_percpu(void *__pdata); | |
1da177e4 LT |
141 | |
142 | #else /* CONFIG_SMP */ | |
143 | ||
b36128c8 | 144 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
7ff6f082 | 145 | |
f2a8205c | 146 | static inline void *__alloc_percpu(size_t size, size_t align) |
7ff6f082 | 147 | { |
f2a8205c TH |
148 | /* |
149 | * Can't easily make larger alignment work with kmalloc. WARN | |
150 | * on it. Larger alignment should only be used for module | |
151 | * percpu sections on SMP for which this path isn't used. | |
152 | */ | |
e3176036 | 153 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
d2b02615 | 154 | return kzalloc(size, GFP_KERNEL); |
7ff6f082 MP |
155 | } |
156 | ||
f2a8205c | 157 | static inline void free_percpu(void *p) |
7ff6f082 | 158 | { |
f2a8205c | 159 | kfree(p); |
1da177e4 LT |
160 | } |
161 | ||
162 | #endif /* CONFIG_SMP */ | |
163 | ||
313e458f RR |
164 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
165 | __alignof__(type)) | |
1da177e4 LT |
166 | |
167 | #endif /* __LINUX_PERCPU_H */ |