]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/kernel/irq.c
Merge branches 'irq-core-for-linus' and 'core-locking-for-linus' of git://git.kernel...
[net-next-2.6.git] / arch / mips / kernel / irq.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/proc_fs.h>
1da177e4
LT
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/kallsyms.h>
88547001 23#include <linux/kgdb.h>
8f99a162 24#include <linux/ftrace.h>
1da177e4
LT
25
26#include <asm/atomic.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29
88547001
JW
30#ifdef CONFIG_KGDB
31int kgdb_early_setup;
32#endif
33
4a4cf779
RB
34static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
35
f543110d 36int allocate_irqno(void)
4a4cf779
RB
37{
38 int irq;
39
40again:
41 irq = find_first_zero_bit(irq_map, NR_IRQS);
42
43 if (irq >= NR_IRQS)
44 return -ENOSPC;
45
46 if (test_and_set_bit(irq, irq_map))
47 goto again;
48
49 return irq;
50}
51
4a4cf779
RB
52/*
53 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
54 * in the kernel initialization so treating allocation failure as BUG() is
55 * ok.
56 */
57void __init alloc_legacy_irqno(void)
58{
59 int i;
60
61 for (i = 0; i <= 16; i++)
62 BUG_ON(test_and_set_bit(i, irq_map));
63}
64
f543110d 65void free_irqno(unsigned int irq)
4a4cf779
RB
66{
67 smp_mb__before_clear_bit();
68 clear_bit(irq, irq_map);
69 smp_mb__after_clear_bit();
70}
71
1da177e4
LT
72/*
73 * 'what should we do if we get a hw irq event on an illegal vector'.
74 * each architecture has to answer this themselves.
75 */
76void ack_bad_irq(unsigned int irq)
77{
1146fe30 78 smtc_im_ack_irq(irq);
1da177e4
LT
79 printk("unexpected IRQ # %d\n", irq);
80}
81
82atomic_t irq_err_count;
83
1da177e4
LT
84/*
85 * Generic, controller-independent functions:
86 */
87
88int show_interrupts(struct seq_file *p, void *v)
89{
90 int i = *(loff_t *) v, j;
91 struct irqaction * action;
92 unsigned long flags;
93
94 if (i == 0) {
95 seq_printf(p, " ");
394e3902 96 for_each_online_cpu(j)
21a151d8 97 seq_printf(p, "CPU%d ", j);
1da177e4
LT
98 seq_putc(p, '\n');
99 }
100
101 if (i < NR_IRQS) {
239007b8 102 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
1da177e4 103 action = irq_desc[i].action;
42a3b4f2 104 if (!action)
1da177e4 105 goto skip;
21a151d8 106 seq_printf(p, "%3d: ", i);
1da177e4
LT
107#ifndef CONFIG_SMP
108 seq_printf(p, "%10u ", kstat_irqs(i));
109#else
394e3902 110 for_each_online_cpu(j)
0b0f0b1c 111 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
1da177e4 112#endif
1ccd1c1c 113 seq_printf(p, " %14s", irq_desc[i].chip->name);
1da177e4
LT
114 seq_printf(p, " %s", action->name);
115
116 for (action=action->next; action; action = action->next)
117 seq_printf(p, ", %s", action->name);
118
119 seq_putc(p, '\n');
120skip:
239007b8 121 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1da177e4
LT
122 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
125 }
126 return 0;
127}
128
937a8015 129asmlinkage void spurious_interrupt(void)
93373ed4
RB
130{
131 atomic_inc(&irq_err_count);
132}
133
1da177e4
LT
134void __init init_IRQ(void)
135{
24649c00
RB
136 int i;
137
88547001
JW
138#ifdef CONFIG_KGDB
139 if (kgdb_early_setup)
140 return;
141#endif
142
24649c00
RB
143 for (i = 0; i < NR_IRQS; i++)
144 set_irq_noprobe(i);
145
1da177e4 146 arch_init_irq();
88547001
JW
147
148#ifdef CONFIG_KGDB
149 if (!kgdb_early_setup)
150 kgdb_early_setup = 1;
151#endif
1da177e4 152}
8f99a162 153
334c86c4
F
154#ifdef DEBUG_STACKOVERFLOW
155static inline void check_stack_overflow(void)
156{
157 unsigned long sp;
158
159 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
160 sp &= THREAD_MASK;
161
162 /*
163 * Check for stack overflow: is there less than STACK_WARN free?
164 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
165 */
166 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
167 printk("do_IRQ: stack overflow: %ld\n",
168 sp - sizeof(struct thread_info));
169 dump_stack();
170 }
171}
172#else
173static inline void check_stack_overflow(void) {}
174#endif
175
176
8f99a162
WZ
177/*
178 * do_IRQ handles all normal device IRQ's (the special
179 * SMP cross-CPU interrupts have their own specific
180 * handlers).
181 */
182void __irq_entry do_IRQ(unsigned int irq)
183{
184 irq_enter();
334c86c4 185 check_stack_overflow();
8f99a162
WZ
186 __DO_IRQ_SMTC_HOOK(irq);
187 generic_handle_irq(irq);
188 irq_exit();
189}
190
191#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
192/*
193 * To avoid inefficient and in some cases pathological re-checking of
194 * IRQ affinity, we have this variant that skips the affinity check.
195 */
196
197void __irq_entry do_IRQ_no_affinity(unsigned int irq)
198{
199 irq_enter();
200 __NO_AFFINITY_IRQ_SMTC_HOOK(irq);
201 generic_handle_irq(irq);
202 irq_exit();
203}
204
205#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */