]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/irq/handle.c
tracing, genirq: add irq enter and exit trace events
[net-next-2.6.git] / kernel / irq / handle.c
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
20 #include <trace/irq.h>
21
22 #include "internals.h"
23
24 /*
25  * lockdep: we want to handle all irq_desc locks as a single lock-class:
26  */
27 struct lock_class_key irq_desc_lock_class;
28
29 /**
30  * handle_bad_irq - handle spurious and unhandled irqs
31  * @irq:       the interrupt number
32  * @desc:      description of the interrupt
33  *
34  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
35  */
36 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
37 {
38         print_irq_desc(irq, desc);
39         kstat_incr_irqs_this_cpu(irq, desc);
40         ack_bad_irq(irq);
41 }
42
43 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
44 static void __init init_irq_default_affinity(void)
45 {
46         alloc_bootmem_cpumask_var(&irq_default_affinity);
47         cpumask_setall(irq_default_affinity);
48 }
49 #else
50 static void __init init_irq_default_affinity(void)
51 {
52 }
53 #endif
54
55 /*
56  * Linux has a controller-independent interrupt architecture.
57  * Every controller has a 'controller-template', that is used
58  * by the main code to do the right thing. Each driver-visible
59  * interrupt source is transparently wired to the appropriate
60  * controller. Thus drivers need not be aware of the
61  * interrupt-controller.
62  *
63  * The code is designed to be easily extended with new/different
64  * interrupt controllers, without having to do assembly magic or
65  * having to touch the generic code.
66  *
67  * Controller mappings for all interrupt sources:
68  */
69 int nr_irqs = NR_IRQS;
70 EXPORT_SYMBOL_GPL(nr_irqs);
71
72 #ifdef CONFIG_SPARSE_IRQ
73 static struct irq_desc irq_desc_init = {
74         .irq        = -1,
75         .status     = IRQ_DISABLED,
76         .chip       = &no_irq_chip,
77         .handle_irq = handle_bad_irq,
78         .depth      = 1,
79         .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
80 #ifdef CONFIG_SMP
81         .affinity   = CPU_MASK_ALL
82 #endif
83 };
84
85 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
86 {
87         unsigned long bytes;
88         char *ptr;
89         int node;
90
91         /* Compute how many bytes we need per irq and allocate them */
92         bytes = nr * sizeof(unsigned int);
93
94         node = cpu_to_node(cpu);
95         ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
96         printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node);
97
98         if (ptr)
99                 desc->kstat_irqs = (unsigned int *)ptr;
100 }
101
102 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
103 {
104         memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
105
106         spin_lock_init(&desc->lock);
107         desc->irq = irq;
108 #ifdef CONFIG_SMP
109         desc->cpu = cpu;
110 #endif
111         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
112         init_kstat_irqs(desc, cpu, nr_cpu_ids);
113         if (!desc->kstat_irqs) {
114                 printk(KERN_ERR "can not alloc kstat_irqs\n");
115                 BUG_ON(1);
116         }
117         arch_init_chip_data(desc, cpu);
118 }
119
120 /*
121  * Protect the sparse_irqs:
122  */
123 DEFINE_SPINLOCK(sparse_irq_lock);
124
125 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
126
127 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
128         [0 ... NR_IRQS_LEGACY-1] = {
129                 .irq        = -1,
130                 .status     = IRQ_DISABLED,
131                 .chip       = &no_irq_chip,
132                 .handle_irq = handle_bad_irq,
133                 .depth      = 1,
134                 .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
135 #ifdef CONFIG_SMP
136                 .affinity   = CPU_MASK_ALL
137 #endif
138         }
139 };
140
141 /* FIXME: use bootmem alloc ...*/
142 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
143
144 int __init early_irq_init(void)
145 {
146         struct irq_desc *desc;
147         int legacy_count;
148         int i;
149
150         init_irq_default_affinity();
151
152         desc = irq_desc_legacy;
153         legacy_count = ARRAY_SIZE(irq_desc_legacy);
154
155         for (i = 0; i < legacy_count; i++) {
156                 desc[i].irq = i;
157                 desc[i].kstat_irqs = kstat_irqs_legacy[i];
158                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
159
160                 irq_desc_ptrs[i] = desc + i;
161         }
162
163         for (i = legacy_count; i < NR_IRQS; i++)
164                 irq_desc_ptrs[i] = NULL;
165
166         return arch_early_irq_init();
167 }
168
169 struct irq_desc *irq_to_desc(unsigned int irq)
170 {
171         return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
172 }
173
174 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
175 {
176         struct irq_desc *desc;
177         unsigned long flags;
178         int node;
179
180         if (irq >= NR_IRQS) {
181                 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
182                                 irq, NR_IRQS);
183                 WARN_ON(1);
184                 return NULL;
185         }
186
187         desc = irq_desc_ptrs[irq];
188         if (desc)
189                 return desc;
190
191         spin_lock_irqsave(&sparse_irq_lock, flags);
192
193         /* We have to check it to avoid races with another CPU */
194         desc = irq_desc_ptrs[irq];
195         if (desc)
196                 goto out_unlock;
197
198         node = cpu_to_node(cpu);
199         desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
200         printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
201                  irq, cpu, node);
202         if (!desc) {
203                 printk(KERN_ERR "can not alloc irq_desc\n");
204                 BUG_ON(1);
205         }
206         init_one_irq_desc(irq, desc, cpu);
207
208         irq_desc_ptrs[irq] = desc;
209
210 out_unlock:
211         spin_unlock_irqrestore(&sparse_irq_lock, flags);
212
213         return desc;
214 }
215
216 #else /* !CONFIG_SPARSE_IRQ */
217
218 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
219         [0 ... NR_IRQS-1] = {
220                 .status = IRQ_DISABLED,
221                 .chip = &no_irq_chip,
222                 .handle_irq = handle_bad_irq,
223                 .depth = 1,
224                 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
225 #ifdef CONFIG_SMP
226                 .affinity = CPU_MASK_ALL
227 #endif
228         }
229 };
230
231 int __init early_irq_init(void)
232 {
233         struct irq_desc *desc;
234         int count;
235         int i;
236
237         init_irq_default_affinity();
238
239         desc = irq_desc;
240         count = ARRAY_SIZE(irq_desc);
241
242         for (i = 0; i < count; i++)
243                 desc[i].irq = i;
244
245         return arch_early_irq_init();
246 }
247
248 struct irq_desc *irq_to_desc(unsigned int irq)
249 {
250         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
251 }
252
253 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
254 {
255         return irq_to_desc(irq);
256 }
257 #endif /* !CONFIG_SPARSE_IRQ */
258
259 /*
260  * What should we do if we get a hw irq event on an illegal vector?
261  * Each architecture has to answer this themself.
262  */
263 static void ack_bad(unsigned int irq)
264 {
265         struct irq_desc *desc = irq_to_desc(irq);
266
267         print_irq_desc(irq, desc);
268         ack_bad_irq(irq);
269 }
270
271 /*
272  * NOP functions
273  */
274 static void noop(unsigned int irq)
275 {
276 }
277
278 static unsigned int noop_ret(unsigned int irq)
279 {
280         return 0;
281 }
282
283 /*
284  * Generic no controller implementation
285  */
286 struct irq_chip no_irq_chip = {
287         .name           = "none",
288         .startup        = noop_ret,
289         .shutdown       = noop,
290         .enable         = noop,
291         .disable        = noop,
292         .ack            = ack_bad,
293         .end            = noop,
294 };
295
296 /*
297  * Generic dummy implementation which can be used for
298  * real dumb interrupt sources
299  */
300 struct irq_chip dummy_irq_chip = {
301         .name           = "dummy",
302         .startup        = noop_ret,
303         .shutdown       = noop,
304         .enable         = noop,
305         .disable        = noop,
306         .ack            = noop,
307         .mask           = noop,
308         .unmask         = noop,
309         .end            = noop,
310 };
311
312 /*
313  * Special, empty irq handler:
314  */
315 irqreturn_t no_action(int cpl, void *dev_id)
316 {
317         return IRQ_NONE;
318 }
319
320 DEFINE_TRACE(irq_handler_entry);
321 DEFINE_TRACE(irq_handler_exit);
322
323 /**
324  * handle_IRQ_event - irq action chain handler
325  * @irq:        the interrupt number
326  * @action:     the interrupt action chain for this irq
327  *
328  * Handles the action chain of an irq event
329  */
330 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
331 {
332         irqreturn_t ret, retval = IRQ_NONE;
333         unsigned int status = 0;
334
335         if (!(action->flags & IRQF_DISABLED))
336                 local_irq_enable_in_hardirq();
337
338         do {
339                 trace_irq_handler_entry(irq, action);
340                 ret = action->handler(irq, action->dev_id);
341                 trace_irq_handler_exit(irq, action, ret);
342                 if (ret == IRQ_HANDLED)
343                         status |= action->flags;
344                 retval |= ret;
345                 action = action->next;
346         } while (action);
347
348         if (status & IRQF_SAMPLE_RANDOM)
349                 add_interrupt_randomness(irq);
350         local_irq_disable();
351
352         return retval;
353 }
354
355 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
356 /**
357  * __do_IRQ - original all in one highlevel IRQ handler
358  * @irq:        the interrupt number
359  *
360  * __do_IRQ handles all normal device IRQ's (the special
361  * SMP cross-CPU interrupts have their own specific
362  * handlers).
363  *
364  * This is the original x86 implementation which is used for every
365  * interrupt type.
366  */
367 unsigned int __do_IRQ(unsigned int irq)
368 {
369         struct irq_desc *desc = irq_to_desc(irq);
370         struct irqaction *action;
371         unsigned int status;
372
373         kstat_incr_irqs_this_cpu(irq, desc);
374
375         if (CHECK_IRQ_PER_CPU(desc->status)) {
376                 irqreturn_t action_ret;
377
378                 /*
379                  * No locking required for CPU-local interrupts:
380                  */
381                 if (desc->chip->ack) {
382                         desc->chip->ack(irq);
383                         /* get new one */
384                         desc = irq_remap_to_desc(irq, desc);
385                 }
386                 if (likely(!(desc->status & IRQ_DISABLED))) {
387                         action_ret = handle_IRQ_event(irq, desc->action);
388                         if (!noirqdebug)
389                                 note_interrupt(irq, desc, action_ret);
390                 }
391                 desc->chip->end(irq);
392                 return 1;
393         }
394
395         spin_lock(&desc->lock);
396         if (desc->chip->ack) {
397                 desc->chip->ack(irq);
398                 desc = irq_remap_to_desc(irq, desc);
399         }
400         /*
401          * REPLAY is when Linux resends an IRQ that was dropped earlier
402          * WAITING is used by probe to mark irqs that are being tested
403          */
404         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
405         status |= IRQ_PENDING; /* we _want_ to handle it */
406
407         /*
408          * If the IRQ is disabled for whatever reason, we cannot
409          * use the action we have.
410          */
411         action = NULL;
412         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
413                 action = desc->action;
414                 status &= ~IRQ_PENDING; /* we commit to handling */
415                 status |= IRQ_INPROGRESS; /* we are handling it */
416         }
417         desc->status = status;
418
419         /*
420          * If there is no IRQ handler or it was disabled, exit early.
421          * Since we set PENDING, if another processor is handling
422          * a different instance of this same irq, the other processor
423          * will take care of it.
424          */
425         if (unlikely(!action))
426                 goto out;
427
428         /*
429          * Edge triggered interrupts need to remember
430          * pending events.
431          * This applies to any hw interrupts that allow a second
432          * instance of the same irq to arrive while we are in do_IRQ
433          * or in the handler. But the code here only handles the _second_
434          * instance of the irq, not the third or fourth. So it is mostly
435          * useful for irq hardware that does not mask cleanly in an
436          * SMP environment.
437          */
438         for (;;) {
439                 irqreturn_t action_ret;
440
441                 spin_unlock(&desc->lock);
442
443                 action_ret = handle_IRQ_event(irq, action);
444                 if (!noirqdebug)
445                         note_interrupt(irq, desc, action_ret);
446
447                 spin_lock(&desc->lock);
448                 if (likely(!(desc->status & IRQ_PENDING)))
449                         break;
450                 desc->status &= ~IRQ_PENDING;
451         }
452         desc->status &= ~IRQ_INPROGRESS;
453
454 out:
455         /*
456          * The ->end() handler has to deal with interrupts which got
457          * disabled while the handler was running.
458          */
459         desc->chip->end(irq);
460         spin_unlock(&desc->lock);
461
462         return 1;
463 }
464 #endif
465
466 void early_init_irq_lock_class(void)
467 {
468         struct irq_desc *desc;
469         int i;
470
471         for_each_irq_desc(i, desc) {
472                 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
473         }
474 }
475
476 #ifdef CONFIG_SPARSE_IRQ
477 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
478 {
479         struct irq_desc *desc = irq_to_desc(irq);
480         return desc ? desc->kstat_irqs[cpu] : 0;
481 }
482 #endif
483 EXPORT_SYMBOL(kstat_irqs_cpu);
484