]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/sh/kernel/irq.c
sh: fix up cpu hotplug IRQ migration for irq_data changes.
[net-next-2.6.git] / arch / sh / kernel / irq.c
CommitLineData
a6a31139 1/*
1da177e4
LT
2 * linux/arch/sh/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 *
7 * SuperH version: Copyright (C) 1999 Niibe Yutaka
8 */
bf3a00f8 9#include <linux/irq.h>
1da177e4 10#include <linux/interrupt.h>
a6a31139 11#include <linux/module.h>
bf3a00f8 12#include <linux/kernel_stat.h>
1da177e4 13#include <linux/seq_file.h>
ba93483f 14#include <linux/ftrace.h>
763142d1 15#include <linux/delay.h>
bf3a00f8 16#include <asm/processor.h>
be782df5 17#include <asm/machvec.h>
a6a31139
PM
18#include <asm/uaccess.h>
19#include <asm/thread_info.h>
f15cbe6f 20#include <cpu/mmu_context.h>
1da177e4 21
35f3c518
PM
22atomic_t irq_err_count;
23
1da177e4
LT
24/*
25 * 'what should we do if we get a hw irq event on an illegal vector'.
26 * each architecture has to answer this themselves, it doesn't deserve
27 * a generic callback i think.
28 */
29void ack_bad_irq(unsigned int irq)
30{
baf4326e 31 atomic_inc(&irq_err_count);
1da177e4
LT
32 printk("unexpected IRQ trap at vector %02x\n", irq);
33}
34
35#if defined(CONFIG_PROC_FS)
fa1d43ab
PM
36/*
37 * /proc/interrupts printing:
38 */
39static int show_other_interrupts(struct seq_file *p, int prec)
40{
731ba330
PM
41 int j;
42
43 seq_printf(p, "%*s: ", prec, "NMI");
44 for_each_online_cpu(j)
45 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
46 seq_printf(p, " Non-maskable interrupts\n");
47
fa1d43ab 48 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
731ba330 49
fa1d43ab
PM
50 return 0;
51}
52
1da177e4
LT
53int show_interrupts(struct seq_file *p, void *v)
54{
fa1d43ab
PM
55 unsigned long flags, any_count = 0;
56 int i = *(loff_t *)v, j, prec;
57 struct irqaction *action;
58 struct irq_desc *desc;
faadfb04
PM
59 struct irq_data *data;
60 struct irq_chip *chip;
fa1d43ab
PM
61
62 if (i > nr_irqs)
63 return 0;
64
65 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
66 j *= 10;
67
68 if (i == nr_irqs)
69 return show_other_interrupts(p, prec);
1da177e4
LT
70
71 if (i == 0) {
fa1d43ab 72 seq_printf(p, "%*s", prec + 8, "");
394e3902 73 for_each_online_cpu(j)
fa1d43ab 74 seq_printf(p, "CPU%-8d", j);
1da177e4
LT
75 seq_putc(p, '\n');
76 }
77
fa1d43ab
PM
78 desc = irq_to_desc(i);
79 if (!desc)
80 return 0;
81
faadfb04
PM
82 data = irq_get_irq_data(i);
83 chip = irq_data_get_irq_chip(data);
84
239007b8 85 raw_spin_lock_irqsave(&desc->lock, flags);
fa1d43ab
PM
86 for_each_online_cpu(j)
87 any_count |= kstat_irqs_cpu(i, j);
88 action = desc->action;
89 if (!action && !any_count)
90 goto out;
1da177e4 91
fa1d43ab
PM
92 seq_printf(p, "%*d: ", prec, i);
93 for_each_online_cpu(j)
94 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
faadfb04 95 seq_printf(p, " %14s", chip->name);
fa1d43ab
PM
96 seq_printf(p, "-%-8s", desc->name);
97
98 if (action) {
99 seq_printf(p, " %s", action->name);
100 while ((action = action->next) != NULL)
1da177e4 101 seq_printf(p, ", %s", action->name);
fa1d43ab 102 }
35f3c518 103
fa1d43ab
PM
104 seq_putc(p, '\n');
105out:
239007b8 106 raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4
LT
107 return 0;
108}
109#endif
110
110ed282 111#ifdef CONFIG_IRQSTACKS
a6a31139
PM
112/*
113 * per-CPU IRQ handling contexts (thread information and stack)
114 */
115union irq_ctx {
116 struct thread_info tinfo;
117 u32 stack[THREAD_SIZE/sizeof(u32)];
118};
119
1dc41e58
PM
120static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
121static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
a6a31139 122
dc825b17
PM
123static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
124static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
125
126static inline void handle_one_irq(unsigned int irq)
bf3a00f8 127{
a6a31139 128 union irq_ctx *curctx, *irqctx;
a6a31139 129
a6a31139
PM
130 curctx = (union irq_ctx *)current_thread_info();
131 irqctx = hardirq_ctx[smp_processor_id()];
132
133 /*
134 * this is where we switch to the IRQ stack. However, if we are
135 * already using the IRQ stack (because we interrupted a hardirq
136 * handler) we can't do that and just have to keep using the
137 * current stack (which is the irq stack already after all)
138 */
139 if (curctx != irqctx) {
140 u32 *isp;
141
142 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
143 irqctx->tinfo.task = curctx->tinfo.task;
144 irqctx->tinfo.previous_sp = current_stack_pointer;
145
1dc41e58
PM
146 /*
147 * Copy the softirq bits in preempt_count so that the
148 * softirq checks work in the hardirq context.
149 */
150 irqctx->tinfo.preempt_count =
151 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
152 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
153
a6a31139
PM
154 __asm__ __volatile__ (
155 "mov %0, r4 \n"
1dc41e58 156 "mov r15, r8 \n"
baf4326e 157 "jsr @%1 \n"
a6a31139 158 /* swith to the irq stack */
baf4326e 159 " mov %2, r15 \n"
a6a31139 160 /* restore the stack (ring zero) */
1dc41e58 161 "mov r8, r15 \n"
a6a31139 162 : /* no outputs */
35f3c518 163 : "r" (irq), "r" (generic_handle_irq), "r" (isp)
a6a31139
PM
164 : "memory", "r0", "r1", "r2", "r3", "r4",
165 "r5", "r6", "r7", "r8", "t", "pr"
166 );
167 } else
35f3c518 168 generic_handle_irq(irq);
1da177e4 169}
a6a31139 170
a6a31139
PM
171/*
172 * allocate per-cpu stacks for hardirq and for softirq processing
173 */
174void irq_ctx_init(int cpu)
175{
176 union irq_ctx *irqctx;
177
178 if (hardirq_ctx[cpu])
179 return;
180
181 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
182 irqctx->tinfo.task = NULL;
183 irqctx->tinfo.exec_domain = NULL;
184 irqctx->tinfo.cpu = cpu;
185 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
186 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
187
188 hardirq_ctx[cpu] = irqctx;
189
190 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
191 irqctx->tinfo.task = NULL;
192 irqctx->tinfo.exec_domain = NULL;
193 irqctx->tinfo.cpu = cpu;
1dc41e58 194 irqctx->tinfo.preempt_count = 0;
a6a31139
PM
195 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
196
197 softirq_ctx[cpu] = irqctx;
198
199 printk("CPU %u irqstacks, hard=%p soft=%p\n",
200 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
201}
202
203void irq_ctx_exit(int cpu)
204{
205 hardirq_ctx[cpu] = NULL;
206}
207
a6a31139
PM
208asmlinkage void do_softirq(void)
209{
210 unsigned long flags;
211 struct thread_info *curctx;
212 union irq_ctx *irqctx;
213 u32 *isp;
214
215 if (in_interrupt())
216 return;
217
218 local_irq_save(flags);
219
220 if (local_softirq_pending()) {
221 curctx = current_thread_info();
222 irqctx = softirq_ctx[smp_processor_id()];
223 irqctx->tinfo.task = curctx->task;
224 irqctx->tinfo.previous_sp = current_stack_pointer;
225
226 /* build the stack frame on the softirq stack */
227 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
228
229 __asm__ __volatile__ (
230 "mov r15, r9 \n"
231 "jsr @%0 \n"
232 /* switch to the softirq stack */
233 " mov %1, r15 \n"
234 /* restore the thread stack */
235 "mov r9, r15 \n"
236 : /* no outputs */
237 : "r" (__do_softirq), "r" (isp)
a6a31139
PM
238 : "memory", "r0", "r1", "r2", "r3", "r4",
239 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
240 );
1dc41e58
PM
241
242 /*
243 * Shouldnt happen, we returned above if in_interrupt():
244 */
245 WARN_ON_ONCE(softirq_count());
a6a31139
PM
246 }
247
248 local_irq_restore(flags);
249}
dc825b17
PM
250#else
251static inline void handle_one_irq(unsigned int irq)
252{
253 generic_handle_irq(irq);
254}
a6a31139 255#endif
ea0f8fea 256
dc825b17
PM
257asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
258{
259 struct pt_regs *old_regs = set_irq_regs(regs);
260
261 irq_enter();
262
263 irq = irq_demux(irq_lookup(irq));
264
265 if (irq != NO_IRQ_IGNORE) {
266 handle_one_irq(irq);
267 irq_finish(irq);
268 }
269
270 irq_exit();
271
272 set_irq_regs(old_regs);
273
274 return IRQ_HANDLED;
275}
276
ea0f8fea
JL
277void __init init_IRQ(void)
278{
90015c89 279 plat_irq_setup();
ea0f8fea
JL
280
281 /* Perform the machine specific initialisation */
282 if (sh_mv.mv_init_irq)
283 sh_mv.mv_init_irq();
284
c1e30ad9
PM
285 intc_finalize();
286
ea0f8fea
JL
287 irq_ctx_init(smp_processor_id());
288}
d8586ba6
PM
289
290#ifdef CONFIG_SPARSE_IRQ
291int __init arch_probe_nr_irqs(void)
292{
293 nr_irqs = sh_mv.mv_nr_irqs;
b683de2b 294 return NR_IRQS_LEGACY;
d8586ba6
PM
295}
296#endif
763142d1
PM
297
298#ifdef CONFIG_HOTPLUG_CPU
fb41a49d 299static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
763142d1 300{
fb41a49d
PM
301 struct irq_desc *desc = irq_to_desc(irq);
302 struct irq_chip *chip = irq_data_get_irq_chip(data);
303
763142d1 304 printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
fb41a49d 305 irq, data->node, cpu);
763142d1
PM
306
307 raw_spin_lock_irq(&desc->lock);
fb41a49d 308 chip->irq_set_affinity(data, cpumask_of(cpu), false);
763142d1
PM
309 raw_spin_unlock_irq(&desc->lock);
310}
311
312/*
313 * The CPU has been marked offline. Migrate IRQs off this CPU. If
314 * the affinity settings do not allow other CPUs, force them onto any
315 * available CPU.
316 */
317void migrate_irqs(void)
318{
763142d1
PM
319 unsigned int irq, cpu = smp_processor_id();
320
fb41a49d
PM
321 for_each_active_irq(irq) {
322 struct irq_data *data = irq_get_irq_data(irq);
323
324 if (data->node == cpu) {
325 unsigned int newcpu = cpumask_any_and(data->affinity,
763142d1
PM
326 cpu_online_mask);
327 if (newcpu >= nr_cpu_ids) {
328 if (printk_ratelimit())
329 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
330 irq, cpu);
331
fb41a49d
PM
332 cpumask_setall(data->affinity);
333 newcpu = cpumask_any_and(data->affinity,
763142d1
PM
334 cpu_online_mask);
335 }
336
fb41a49d 337 route_irq(data, irq, newcpu);
763142d1
PM
338 }
339 }
340}
341#endif