]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/sibyte/sb1250/irq.c
[MIPS] time: Helpers to compute clocksource/event shift and mult values.
[net-next-2.6.git] / arch / mips / sibyte / sb1250 / irq.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/linkage.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/smp.h>
24#include <linux/mm.h>
25#include <linux/slab.h>
26#include <linux/kernel_stat.h>
27
28#include <asm/errno.h>
29#include <asm/signal.h>
30#include <asm/system.h>
7bcf7717 31#include <asm/time.h>
1da177e4
LT
32#include <asm/io.h>
33
34#include <asm/sibyte/sb1250_regs.h>
35#include <asm/sibyte/sb1250_int.h>
36#include <asm/sibyte/sb1250_uart.h>
37#include <asm/sibyte/sb1250_scd.h>
38#include <asm/sibyte/sb1250.h>
39
40/*
41 * These are the routines that handle all the low level interrupt stuff.
42 * Actions handled here are: initialization of the interrupt map, requesting of
43 * interrupt lines by handlers, dispatching if interrupts to handlers, probing
44 * for interrupt lines
45 */
46
47
1da177e4
LT
48static void end_sb1250_irq(unsigned int irq);
49static void enable_sb1250_irq(unsigned int irq);
50static void disable_sb1250_irq(unsigned int irq);
1da177e4
LT
51static void ack_sb1250_irq(unsigned int irq);
52#ifdef CONFIG_SMP
942d042d 53static void sb1250_set_affinity(unsigned int irq, cpumask_t mask);
1da177e4
LT
54#endif
55
56#ifdef CONFIG_SIBYTE_HAS_LDT
57extern unsigned long ldt_eoi_space;
58#endif
59
60#ifdef CONFIG_KGDB
61static int kgdb_irq;
62
63/* Default to UART1 */
64int kgdb_port = 1;
477f949e 65#ifdef CONFIG_SERIAL_SB1250_DUART
1da177e4
LT
66extern char sb1250_duart_present[];
67#endif
68#endif
69
94dee171 70static struct irq_chip sb1250_irq_type = {
70d21cde 71 .name = "SB1250-IMR",
8ab00b9a 72 .ack = ack_sb1250_irq,
1603b5ac
AN
73 .mask = disable_sb1250_irq,
74 .mask_ack = ack_sb1250_irq,
75 .unmask = enable_sb1250_irq,
8ab00b9a 76 .end = end_sb1250_irq,
1da177e4 77#ifdef CONFIG_SMP
8ab00b9a 78 .set_affinity = sb1250_set_affinity
1da177e4
LT
79#endif
80};
81
82/* Store the CPU id (not the logical number) */
83int sb1250_irq_owner[SB1250_NR_IRQS];
84
85DEFINE_SPINLOCK(sb1250_imr_lock);
86
87void sb1250_mask_irq(int cpu, int irq)
88{
89 unsigned long flags;
90 u64 cur_ints;
91
92 spin_lock_irqsave(&sb1250_imr_lock, flags);
65bda1a9
MR
93 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
94 R_IMR_INTERRUPT_MASK));
1da177e4 95 cur_ints |= (((u64) 1) << irq);
65bda1a9
MR
96 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
97 R_IMR_INTERRUPT_MASK));
1da177e4
LT
98 spin_unlock_irqrestore(&sb1250_imr_lock, flags);
99}
100
101void sb1250_unmask_irq(int cpu, int irq)
102{
103 unsigned long flags;
104 u64 cur_ints;
105
106 spin_lock_irqsave(&sb1250_imr_lock, flags);
65bda1a9
MR
107 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
108 R_IMR_INTERRUPT_MASK));
1da177e4 109 cur_ints &= ~(((u64) 1) << irq);
65bda1a9
MR
110 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
111 R_IMR_INTERRUPT_MASK));
1da177e4
LT
112 spin_unlock_irqrestore(&sb1250_imr_lock, flags);
113}
114
115#ifdef CONFIG_SMP
942d042d 116static void sb1250_set_affinity(unsigned int irq, cpumask_t mask)
1da177e4
LT
117{
118 int i = 0, old_cpu, cpu, int_on;
119 u64 cur_ints;
94dee171 120 struct irq_desc *desc = irq_desc + irq;
1da177e4
LT
121 unsigned long flags;
122
942d042d 123 i = first_cpu(mask);
1da177e4 124
942d042d 125 if (cpus_weight(mask) > 1) {
1da177e4
LT
126 printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
127 return;
128 }
129
130 /* Convert logical CPU to physical CPU */
131 cpu = cpu_logical_map(i);
132
133 /* Protect against other affinity changers and IMR manipulation */
134 spin_lock_irqsave(&desc->lock, flags);
135 spin_lock(&sb1250_imr_lock);
136
137 /* Swizzle each CPU's IMR (but leave the IP selection alone) */
138 old_cpu = sb1250_irq_owner[irq];
65bda1a9
MR
139 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
140 R_IMR_INTERRUPT_MASK));
1da177e4
LT
141 int_on = !(cur_ints & (((u64) 1) << irq));
142 if (int_on) {
143 /* If it was on, mask it */
144 cur_ints |= (((u64) 1) << irq);
65bda1a9
MR
145 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
146 R_IMR_INTERRUPT_MASK));
1da177e4
LT
147 }
148 sb1250_irq_owner[irq] = cpu;
149 if (int_on) {
150 /* unmask for the new CPU */
65bda1a9
MR
151 cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
152 R_IMR_INTERRUPT_MASK));
1da177e4 153 cur_ints &= ~(((u64) 1) << irq);
65bda1a9
MR
154 ____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
155 R_IMR_INTERRUPT_MASK));
1da177e4
LT
156 }
157 spin_unlock(&sb1250_imr_lock);
158 spin_unlock_irqrestore(&desc->lock, flags);
159}
160#endif
161
1da177e4
LT
162/*****************************************************************************/
163
1da177e4
LT
164static void disable_sb1250_irq(unsigned int irq)
165{
166 sb1250_mask_irq(sb1250_irq_owner[irq], irq);
167}
168
169static void enable_sb1250_irq(unsigned int irq)
170{
171 sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
172}
173
174
175static void ack_sb1250_irq(unsigned int irq)
176{
177#ifdef CONFIG_SIBYTE_HAS_LDT
178 u64 pending;
179
180 /*
181 * If the interrupt was an HT interrupt, now is the time to
182 * clear it. NOTE: we assume the HT bridge was set up to
183 * deliver the interrupts to all CPUs (which makes affinity
184 * changing easier for us)
185 */
65bda1a9
MR
186 pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq],
187 R_IMR_LDT_INTERRUPT)));
1da177e4
LT
188 pending &= ((u64)1 << (irq));
189 if (pending) {
190 int i;
191 for (i=0; i<NR_CPUS; i++) {
192 int cpu;
193#ifdef CONFIG_SMP
194 cpu = cpu_logical_map(i);
195#else
196 cpu = i;
197#endif
198 /*
199 * Clear for all CPUs so an affinity switch
200 * doesn't find an old status
201 */
65bda1a9
MR
202 __raw_writeq(pending,
203 IOADDR(A_IMR_REGISTER(cpu,
1da177e4
LT
204 R_IMR_LDT_INTERRUPT_CLR)));
205 }
206
207 /*
208 * Generate EOI. For Pass 1 parts, EOI is a nop. For
209 * Pass 2, the LDT world may be edge-triggered, but
210 * this EOI shouldn't hurt. If they are
211 * level-sensitive, the EOI is required.
212 */
213 *(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0;
214 }
215#endif
216 sb1250_mask_irq(sb1250_irq_owner[irq], irq);
217}
218
219
220static void end_sb1250_irq(unsigned int irq)
221{
222 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
223 sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
224 }
225}
226
227
228void __init init_sb1250_irqs(void)
229{
230 int i;
231
1603b5ac
AN
232 for (i = 0; i < SB1250_NR_IRQS; i++) {
233 set_irq_chip(i, &sb1250_irq_type);
234 sb1250_irq_owner[i] = 0;
1da177e4
LT
235 }
236}
237
238
937a8015 239static irqreturn_t sb1250_dummy_handler(int irq, void *dev_id)
1da177e4
LT
240{
241 return IRQ_NONE;
242}
243
244static struct irqaction sb1250_dummy_action = {
245 .handler = sb1250_dummy_handler,
246 .flags = 0,
247 .mask = CPU_MASK_NONE,
248 .name = "sb1250-private",
249 .next = NULL,
250 .dev_id = 0
251};
252
253int sb1250_steal_irq(int irq)
254{
94dee171 255 struct irq_desc *desc = irq_desc + irq;
1da177e4
LT
256 unsigned long flags;
257 int retval = 0;
258
259 if (irq >= SB1250_NR_IRQS)
260 return -EINVAL;
261
21a151d8 262 spin_lock_irqsave(&desc->lock, flags);
1da177e4
LT
263 /* Don't allow sharing at all for these */
264 if (desc->action != NULL)
265 retval = -EBUSY;
266 else {
267 desc->action = &sb1250_dummy_action;
268 desc->depth = 0;
269 }
21a151d8 270 spin_unlock_irqrestore(&desc->lock, flags);
1da177e4
LT
271 return 0;
272}
273
274/*
275 * arch_init_irq is called early in the boot sequence from init/main.c via
276 * init_IRQ. It is responsible for setting up the interrupt mapper and
277 * installing the handler that will be responsible for dispatching interrupts
278 * to the "right" place.
279 */
280/*
281 * For now, map all interrupts to IP[2]. We could save
282 * some cycles by parceling out system interrupts to different
283 * IP lines, but keep it simple for bringup. We'll also direct
284 * all interrupts to a single CPU; we should probably route
285 * PCI and LDT to one cpu and everything else to the other
286 * to balance the load a bit.
287 *
288 * On the second cpu, everything is set to IP5, which is
289 * ignored, EXCEPT the mailbox interrupt. That one is
290 * set to IP[2] so it is handled. This is needed so we
291 * can do cross-cpu function calls, as requred by SMP
292 */
293
294#define IMR_IP2_VAL K_INT_MAP_I0
295#define IMR_IP3_VAL K_INT_MAP_I1
296#define IMR_IP4_VAL K_INT_MAP_I2
297#define IMR_IP5_VAL K_INT_MAP_I3
298#define IMR_IP6_VAL K_INT_MAP_I4
299
300void __init arch_init_irq(void)
301{
302
303 unsigned int i;
304 u64 tmp;
305 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
306 STATUSF_IP1 | STATUSF_IP0;
307
308 /* Default everything to IP2 */
309 for (i = 0; i < SB1250_NR_IRQS; i++) { /* was I0 */
65bda1a9
MR
310 __raw_writeq(IMR_IP2_VAL,
311 IOADDR(A_IMR_REGISTER(0,
312 R_IMR_INTERRUPT_MAP_BASE) +
313 (i << 3)));
314 __raw_writeq(IMR_IP2_VAL,
315 IOADDR(A_IMR_REGISTER(1,
316 R_IMR_INTERRUPT_MAP_BASE) +
317 (i << 3)));
1da177e4
LT
318 }
319
320 init_sb1250_irqs();
321
322 /*
323 * Map the high 16 bits of the mailbox registers to IP[3], for
324 * inter-cpu messages
325 */
326 /* Was I1 */
65bda1a9
MR
327 __raw_writeq(IMR_IP3_VAL,
328 IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
329 (K_INT_MBOX_0 << 3)));
330 __raw_writeq(IMR_IP3_VAL,
331 IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
332 (K_INT_MBOX_0 << 3)));
1da177e4
LT
333
334 /* Clear the mailboxes. The firmware may leave them dirty */
65bda1a9
MR
335 __raw_writeq(0xffffffffffffffffULL,
336 IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
337 __raw_writeq(0xffffffffffffffffULL,
338 IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU)));
1da177e4
LT
339
340 /* Mask everything except the mailbox registers for both cpus */
341 tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0);
65bda1a9
MR
342 __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
343 __raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));
1da177e4
LT
344
345 sb1250_steal_irq(K_INT_MBOX_0);
346
347 /*
348 * Note that the timer interrupts are also mapped, but this is
42a3b4f2 349 * done in sb1250_time_init(). Also, the profiling driver
1da177e4
LT
350 * does its own management of IP7.
351 */
352
353#ifdef CONFIG_KGDB
354 imask |= STATUSF_IP6;
355#endif
356 /* Enable necessary IPs, disable the rest */
357 change_c0_status(ST0_IM, imask);
1da177e4
LT
358
359#ifdef CONFIG_KGDB
360 if (kgdb_flag) {
361 kgdb_irq = K_INT_UART_0 + kgdb_port;
362
477f949e 363#ifdef CONFIG_SERIAL_SB1250_DUART
1da177e4
LT
364 sb1250_duart_present[kgdb_port] = 0;
365#endif
366 /* Setup uart 1 settings, mapper */
65bda1a9
MR
367 __raw_writeq(M_DUART_IMR_BRK,
368 IOADDR(A_DUART_IMRREG(kgdb_port)));
1da177e4
LT
369
370 sb1250_steal_irq(kgdb_irq);
65bda1a9
MR
371 __raw_writeq(IMR_IP6_VAL,
372 IOADDR(A_IMR_REGISTER(0,
373 R_IMR_INTERRUPT_MAP_BASE) +
374 (kgdb_irq << 3)));
1da177e4
LT
375 sb1250_unmask_irq(0, kgdb_irq);
376 }
377#endif
378}
379
380#ifdef CONFIG_KGDB
381
382#include <linux/delay.h>
383
21a151d8
RB
384#define duart_out(reg, val) csr_out32(val, IOADDR(A_DUART_CHANREG(kgdb_port, reg)))
385#define duart_in(reg) csr_in32(IOADDR(A_DUART_CHANREG(kgdb_port, reg)))
1da177e4 386
937a8015 387static void sb1250_kgdb_interrupt(void)
1da177e4
LT
388{
389 /*
390 * Clear break-change status (allow some time for the remote
391 * host to stop the break, since we would see another
392 * interrupt on the end-of-break too)
393 */
394 kstat_this_cpu.irqs[kgdb_irq]++;
395 mdelay(500);
396 duart_out(R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT |
397 M_DUART_RX_EN | M_DUART_TX_EN);
937a8015 398 set_async_breakpoint(&get_irq_regs()->cp0_epc);
1da177e4
LT
399}
400
401#endif /* CONFIG_KGDB */
e4ac58af 402
7bcf7717
RB
403static inline void sb1250_timer_interrupt(void)
404{
405 int cpu = smp_processor_id();
406 int irq = K_INT_TIMER_0 + cpu;
407
408 irq_enter();
409 kstat_this_cpu.irqs[irq]++;
410
411 write_seqlock(&xtime_lock);
412
413 /* ACK interrupt */
414 ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
415 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
416
417 /*
418 * call the generic timer interrupt handling
419 */
420 do_timer(1);
421
422 write_sequnlock(&xtime_lock);
423
424 /*
425 * In UP mode, we call local_timer_interrupt() to do profiling
426 * and process accouting.
427 *
428 * In SMP mode, local_timer_interrupt() is invoked by appropriate
429 * low-level local timer interrupt handler.
430 */
431 local_timer_interrupt(irq);
432
433 irq_exit();
434}
435
937a8015 436extern void sb1250_mailbox_interrupt(void);
4fb60a4b 437
937a8015 438asmlinkage void plat_irq_dispatch(void)
e4ac58af
RB
439{
440 unsigned int pending;
441
e4ac58af
RB
442 /*
443 * What a pain. We have to be really careful saving the upper 32 bits
444 * of any * register across function calls if we don't want them
445 * trashed--since were running in -o32, the calling routing never saves
446 * the full 64 bits of a register across a function call. Being the
447 * interrupt handler, we're guaranteed that interrupts are disabled
448 * during this code so we don't have to worry about random interrupts
449 * blasting the high 32 bits.
450 */
451
119537c0 452 pending = read_c0_cause() & read_c0_status() & ST0_IM;
e4ac58af 453
7bcf7717
RB
454 if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
455 do_IRQ(MIPS_CPU_IRQ_BASE + 7);
456 else if (pending & CAUSEF_IP4)
937a8015 457 sb1250_timer_interrupt();
e4ac58af
RB
458
459#ifdef CONFIG_SMP
6e61e85b 460 else if (pending & CAUSEF_IP3)
937a8015 461 sb1250_mailbox_interrupt();
e4ac58af
RB
462#endif
463
464#ifdef CONFIG_KGDB
6e61e85b 465 else if (pending & CAUSEF_IP6) /* KGDB (uart 1) */
937a8015 466 sb1250_kgdb_interrupt();
e4ac58af
RB
467#endif
468
6e61e85b 469 else if (pending & CAUSEF_IP2) {
e4ac58af
RB
470 unsigned long long mask;
471
472 /*
473 * Default...we've hit an IP[2] interrupt, which means we've
474 * got to check the 1250 interrupt registers to figure out what
475 * to do. Need to detect which CPU we're on, now that
4fb60a4b 476 * smp_affinity is supported.
e4ac58af
RB
477 */
478 mask = __raw_readq(IOADDR(A_IMR_REGISTER(smp_processor_id(),
479 R_IMR_INTERRUPT_STATUS_BASE)));
480 if (mask)
937a8015 481 do_IRQ(fls64(mask) - 1);
d599def5 482 else
937a8015 483 spurious_interrupt();
d599def5 484 } else
937a8015 485 spurious_interrupt();
e4ac58af 486}