]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kernel/smp.c
[IA64] Shrink shadow_flush_counts to a short array to save 8k of per_cpu area.
[net-next-2.6.git] / arch / ia64 / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * SMP Support
3 *
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Lots of stuff stolen from arch/alpha/kernel/smp.c
8 *
9 * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
10 * the existing code (on the lines of x86 port).
11 * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12 * calibration on each CPU.
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14 * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
15 * & cpu_online_map now gets done here (instead of setup.c)
16 * 99/10/05 davidm Update to bring it in sync with new command-line processing
17 * scheme.
18 * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19 * smp_call_function_single to resend IPI on timeouts
20 */
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/smp.h>
27#include <linux/kernel_stat.h>
28#include <linux/mm.h>
29#include <linux/cache.h>
30#include <linux/delay.h>
31#include <linux/efi.h>
32#include <linux/bitops.h>
a7956113 33#include <linux/kexec.h>
1da177e4
LT
34
35#include <asm/atomic.h>
36#include <asm/current.h>
37#include <asm/delay.h>
38#include <asm/machvec.h>
39#include <asm/io.h>
40#include <asm/irq.h>
41#include <asm/page.h>
42#include <asm/pgalloc.h>
43#include <asm/pgtable.h>
44#include <asm/processor.h>
45#include <asm/ptrace.h>
46#include <asm/sal.h>
47#include <asm/system.h>
48#include <asm/tlbflush.h>
49#include <asm/unistd.h>
50#include <asm/mca.h>
51
3be44b9c
JS
52/*
53 * Note: alignment of 4 entries/cacheline was empirically determined
54 * to be a good tradeoff between hot cachelines & spreading the array
55 * across too many cacheline.
56 */
57static struct local_tlb_flush_counts {
58 unsigned int count;
59} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
60
97653f92 61static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
3be44b9c 62
1da177e4
LT
63#define IPI_CALL_FUNC 0
64#define IPI_CPU_STOP 1
f27b433e 65#define IPI_CALL_FUNC_SINGLE 2
a7956113 66#define IPI_KDUMP_CPU_STOP 3
1da177e4
LT
67
68/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
f34e3b61 69static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
1da177e4
LT
70
71extern void cpu_halt (void);
72
1da177e4 73static void
c0cd661b 74stop_this_cpu(void)
1da177e4
LT
75{
76 /*
77 * Remove this CPU:
78 */
79 cpu_clear(smp_processor_id(), cpu_online_map);
80 max_xtp();
81 local_irq_disable();
82 cpu_halt();
83}
84
85void
86cpu_die(void)
87{
88 max_xtp();
89 local_irq_disable();
90 cpu_halt();
91 /* Should never be here */
92 BUG();
93 for (;;);
94}
95
96irqreturn_t
024e4f2c 97handle_IPI (int irq, void *dev_id)
1da177e4
LT
98{
99 int this_cpu = get_cpu();
100 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
101 unsigned long ops;
102
103 mb(); /* Order interrupt and bit testing. */
104 while ((ops = xchg(pending_ipis, 0)) != 0) {
105 mb(); /* Order bit clearing and data access. */
106 do {
107 unsigned long which;
108
109 which = ffz(~ops);
110 ops &= ~(1 << which);
111
112 switch (which) {
c0cd661b 113 case IPI_CPU_STOP:
1da177e4
LT
114 stop_this_cpu();
115 break;
f27b433e
JA
116 case IPI_CALL_FUNC:
117 generic_smp_call_function_interrupt();
118 break;
119 case IPI_CALL_FUNC_SINGLE:
120 generic_smp_call_function_single_interrupt();
121 break;
45a98fc6 122#ifdef CONFIG_KEXEC
c0cd661b 123 case IPI_KDUMP_CPU_STOP:
a7956113
ZN
124 unw_init_running(kdump_cpu_freeze, NULL);
125 break;
126#endif
c0cd661b
HS
127 default:
128 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
129 this_cpu, which);
1da177e4
LT
130 break;
131 }
132 } while (ops);
133 mb(); /* Order data access and bit testing. */
134 }
135 put_cpu();
136 return IRQ_HANDLED;
137}
138
f27b433e
JA
139
140
1da177e4 141/*
72fdbdce 142 * Called with preemption disabled.
1da177e4
LT
143 */
144static inline void
145send_IPI_single (int dest_cpu, int op)
146{
147 set_bit(op, &per_cpu(ipi_operation, dest_cpu));
148 platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
149}
150
151/*
72fdbdce 152 * Called with preemption disabled.
1da177e4
LT
153 */
154static inline void
155send_IPI_allbutself (int op)
156{
157 unsigned int i;
158
dc565b52 159 for_each_online_cpu(i) {
160 if (i != smp_processor_id())
1da177e4
LT
161 send_IPI_single(i, op);
162 }
163}
164
31a6b11f
XZ
165/*
166 * Called with preemption disabled.
167 */
168static inline void
169send_IPI_mask(cpumask_t mask, int op)
170{
171 unsigned int cpu;
172
173 for_each_cpu_mask(cpu, mask) {
174 send_IPI_single(cpu, op);
175 }
176}
177
1da177e4 178/*
72fdbdce 179 * Called with preemption disabled.
1da177e4
LT
180 */
181static inline void
182send_IPI_all (int op)
183{
184 int i;
185
dc565b52 186 for_each_online_cpu(i) {
187 send_IPI_single(i, op);
188 }
1da177e4
LT
189}
190
191/*
72fdbdce 192 * Called with preemption disabled.
1da177e4
LT
193 */
194static inline void
195send_IPI_self (int op)
196{
197 send_IPI_single(smp_processor_id(), op);
198}
199
45a98fc6 200#ifdef CONFIG_KEXEC
a7956113 201void
ccbebdac 202kdump_smp_send_stop(void)
a7956113
ZN
203{
204 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
205}
206
207void
ccbebdac 208kdump_smp_send_init(void)
a7956113
ZN
209{
210 unsigned int cpu, self_cpu;
211 self_cpu = smp_processor_id();
212 for_each_online_cpu(cpu) {
213 if (cpu != self_cpu) {
214 if(kdump_status[cpu] == 0)
215 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
216 }
217 }
218}
219#endif
1da177e4 220/*
72fdbdce 221 * Called with preemption disabled.
1da177e4
LT
222 */
223void
224smp_send_reschedule (int cpu)
225{
226 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
227}
228
3be44b9c 229/*
72fdbdce 230 * Called with preemption disabled.
3be44b9c
JS
231 */
232static void
233smp_send_local_flush_tlb (int cpu)
234{
235 platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
236}
237
238void
239smp_local_flush_tlb(void)
240{
241 /*
242 * Use atomic ops. Otherwise, the load/increment/store sequence from
243 * a "++" operation can have the line stolen between the load & store.
244 * The overhead of the atomic op in negligible in this case & offers
245 * significant benefit for the brief periods where lots of cpus
246 * are simultaneously flushing TLBs.
247 */
248 ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
249 local_flush_tlb_all();
250}
251
252#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
253
254void
255smp_flush_tlb_cpumask(cpumask_t xcpumask)
256{
97653f92 257 unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts);
3be44b9c
JS
258 cpumask_t cpumask = xcpumask;
259 int mycpu, cpu, flush_mycpu = 0;
260
261 preempt_disable();
262 mycpu = smp_processor_id();
263
264 for_each_cpu_mask(cpu, cpumask)
97653f92 265 counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
3be44b9c
JS
266
267 mb();
268 for_each_cpu_mask(cpu, cpumask) {
269 if (cpu == mycpu)
270 flush_mycpu = 1;
271 else
272 smp_send_local_flush_tlb(cpu);
273 }
274
275 if (flush_mycpu)
276 smp_local_flush_tlb();
277
278 for_each_cpu_mask(cpu, cpumask)
97653f92 279 while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
3be44b9c
JS
280 udelay(FLUSH_DELAY);
281
282 preempt_enable();
283}
284
1da177e4
LT
285void
286smp_flush_tlb_all (void)
287{
15c8b6c1 288 on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
1da177e4
LT
289}
290
291void
292smp_flush_tlb_mm (struct mm_struct *mm)
293{
a68db763 294 preempt_disable();
1da177e4
LT
295 /* this happens for the common case of a single-threaded fork(): */
296 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
297 {
298 local_finish_flush_tlb_mm(mm);
a68db763 299 preempt_enable();
1da177e4
LT
300 return;
301 }
302
a68db763 303 preempt_enable();
1da177e4
LT
304 /*
305 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
306 * have been running in the address space. It's not clear that this is worth the
307 * trouble though: to avoid races, we have to raise the IPI on the target CPU
308 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
309 * rather trivial.
310 */
15c8b6c1 311 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
1da177e4
LT
312}
313
f27b433e 314void arch_send_call_function_single_ipi(int cpu)
31a6b11f 315{
f27b433e 316 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
31a6b11f 317}
31a6b11f 318
f27b433e 319void arch_send_call_function_ipi(cpumask_t mask)
1da177e4 320{
f27b433e 321 send_IPI_mask(mask, IPI_CALL_FUNC);
1da177e4 322}
1da177e4
LT
323
324/*
325 * this function calls the 'stop' function on all other CPUs in the system.
326 */
327void
328smp_send_stop (void)
329{
330 send_IPI_allbutself(IPI_CPU_STOP);
331}
332
cb2e0912 333int
1da177e4
LT
334setup_profiling_timer (unsigned int multiplier)
335{
336 return -EINVAL;
337}