]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/powerpc/kernel/irq.c
710505240f2f3fb4f8ab49ee31e765bce3104a6a
[net-next-2.6.git] / arch / powerpc / kernel / irq.c
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30
31 #undef DEBUG
32
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56 #include <linux/perf_event.h>
57
58 #include <asm/uaccess.h>
59 #include <asm/system.h>
60 #include <asm/io.h>
61 #include <asm/pgtable.h>
62 #include <asm/irq.h>
63 #include <asm/cache.h>
64 #include <asm/prom.h>
65 #include <asm/ptrace.h>
66 #include <asm/machdep.h>
67 #include <asm/udbg.h>
68 #ifdef CONFIG_PPC64
69 #include <asm/paca.h>
70 #include <asm/firmware.h>
71 #include <asm/lv1call.h>
72 #endif
73 #define CREATE_TRACE_POINTS
74 #include <asm/trace.h>
75
76 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77 EXPORT_PER_CPU_SYMBOL(irq_stat);
78
79 int __irq_offset_value;
80 static int ppc_spurious_interrupts;
81
82 #ifdef CONFIG_PPC32
83 EXPORT_SYMBOL(__irq_offset_value);
84 atomic_t ppc_n_lost_interrupts;
85
86 #ifdef CONFIG_TAU_INT
87 extern int tau_initialized;
88 extern int tau_interrupts(int);
89 #endif
90 #endif /* CONFIG_PPC32 */
91
92 #ifdef CONFIG_PPC64
93
94 #ifndef CONFIG_SPARSE_IRQ
95 EXPORT_SYMBOL(irq_desc);
96 #endif
97
98 int distribute_irqs = 1;
99
100 static inline notrace unsigned long get_hard_enabled(void)
101 {
102         unsigned long enabled;
103
104         __asm__ __volatile__("lbz %0,%1(13)"
105         : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106
107         return enabled;
108 }
109
110 static inline notrace void set_soft_enabled(unsigned long enable)
111 {
112         __asm__ __volatile__("stb %0,%1(13)"
113         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114 }
115
116 notrace void raw_local_irq_restore(unsigned long en)
117 {
118         /*
119          * get_paca()->soft_enabled = en;
120          * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121          * That was allowed before, and in such a case we do need to take care
122          * that gcc will set soft_enabled directly via r13, not choose to use
123          * an intermediate register, lest we're preempted to a different cpu.
124          */
125         set_soft_enabled(en);
126         if (!en)
127                 return;
128
129 #ifdef CONFIG_PPC_STD_MMU_64
130         if (firmware_has_feature(FW_FEATURE_ISERIES)) {
131                 /*
132                  * Do we need to disable preemption here?  Not really: in the
133                  * unlikely event that we're preempted to a different cpu in
134                  * between getting r13, loading its lppaca_ptr, and loading
135                  * its any_int, we might call iseries_handle_interrupts without
136                  * an interrupt pending on the new cpu, but that's no disaster,
137                  * is it?  And the business of preempting us off the old cpu
138                  * would itself involve a local_irq_restore which handles the
139                  * interrupt to that cpu.
140                  *
141                  * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
142                  * to avoid any preemption checking added into get_paca().
143                  */
144                 if (local_paca->lppaca_ptr->int_dword.any_int)
145                         iseries_handle_interrupts();
146         }
147 #endif /* CONFIG_PPC_STD_MMU_64 */
148
149         if (test_perf_event_pending()) {
150                 clear_perf_event_pending();
151                 perf_event_do_pending();
152         }
153
154         /*
155          * if (get_paca()->hard_enabled) return;
156          * But again we need to take care that gcc gets hard_enabled directly
157          * via r13, not choose to use an intermediate register, lest we're
158          * preempted to a different cpu in between the two instructions.
159          */
160         if (get_hard_enabled())
161                 return;
162
163         /*
164          * Need to hard-enable interrupts here.  Since currently disabled,
165          * no need to take further asm precautions against preemption; but
166          * use local_paca instead of get_paca() to avoid preemption checking.
167          */
168         local_paca->hard_enabled = en;
169         if ((int)mfspr(SPRN_DEC) < 0)
170                 mtspr(SPRN_DEC, 1);
171
172         /*
173          * Force the delivery of pending soft-disabled interrupts on PS3.
174          * Any HV call will have this side effect.
175          */
176         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
177                 u64 tmp;
178                 lv1_get_version_info(&tmp);
179         }
180
181         __hard_irq_enable();
182 }
183 EXPORT_SYMBOL(raw_local_irq_restore);
184 #endif /* CONFIG_PPC64 */
185
186 static int show_other_interrupts(struct seq_file *p, int prec)
187 {
188         int j;
189
190 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
191         if (tau_initialized) {
192                 seq_printf(p, "%*s: ", prec, "TAU");
193                 for_each_online_cpu(j)
194                         seq_printf(p, "%10u ", tau_interrupts(j));
195                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
196         }
197 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
198
199         seq_printf(p, "%*s: ", prec, "LOC");
200         for_each_online_cpu(j)
201                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
202         seq_printf(p, "  Local timer interrupts\n");
203
204         seq_printf(p, "%*s: ", prec, "CNT");
205         for_each_online_cpu(j)
206                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
207         seq_printf(p, "  Performance monitoring interrupts\n");
208
209         seq_printf(p, "%*s: ", prec, "MCE");
210         for_each_online_cpu(j)
211                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
212         seq_printf(p, "  Machine check exceptions\n");
213
214         seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts);
215
216         return 0;
217 }
218
219 int show_interrupts(struct seq_file *p, void *v)
220 {
221         unsigned long flags, any_count = 0;
222         int i = *(loff_t *) v, j, prec;
223         struct irqaction *action;
224         struct irq_desc *desc;
225
226         if (i > nr_irqs)
227                 return 0;
228
229         for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
230                 j *= 10;
231
232         if (i == nr_irqs)
233                 return show_other_interrupts(p, prec);
234
235         /* print header */
236         if (i == 0) {
237                 seq_printf(p, "%*s", prec + 8, "");
238                 for_each_online_cpu(j)
239                         seq_printf(p, "CPU%-8d", j);
240                 seq_putc(p, '\n');
241         }
242
243         desc = irq_to_desc(i);
244         if (!desc)
245                 return 0;
246
247         raw_spin_lock_irqsave(&desc->lock, flags);
248         for_each_online_cpu(j)
249                 any_count |= kstat_irqs_cpu(i, j);
250         action = desc->action;
251         if (!action && !any_count)
252                 goto out;
253
254         seq_printf(p, "%*d: ", prec, i);
255         for_each_online_cpu(j)
256                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
257
258         if (desc->chip)
259                 seq_printf(p, "  %-16s", desc->chip->name);
260         else
261                 seq_printf(p, "  %-16s", "None");
262         seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
263
264         if (action) {
265                 seq_printf(p, "     %s", action->name);
266                 while ((action = action->next) != NULL)
267                         seq_printf(p, ", %s", action->name);
268         }
269
270         seq_putc(p, '\n');
271 out:
272         raw_spin_unlock_irqrestore(&desc->lock, flags);
273         return 0;
274 }
275
276 /*
277  * /proc/stat helpers
278  */
279 u64 arch_irq_stat_cpu(unsigned int cpu)
280 {
281         u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
282
283         sum += per_cpu(irq_stat, cpu).pmu_irqs;
284         sum += per_cpu(irq_stat, cpu).mce_exceptions;
285
286         return sum;
287 }
288
289 u64 arch_irq_stat(void)
290 {
291         u64 sum = ppc_spurious_interrupts;
292
293         return sum;
294 }
295
296 #ifdef CONFIG_HOTPLUG_CPU
297 void fixup_irqs(cpumask_t map)
298 {
299         struct irq_desc *desc;
300         unsigned int irq;
301         static int warned;
302
303         for_each_irq(irq) {
304                 cpumask_t mask;
305
306                 desc = irq_to_desc(irq);
307                 if (desc && desc->status & IRQ_PER_CPU)
308                         continue;
309
310                 cpumask_and(&mask, desc->affinity, &map);
311                 if (any_online_cpu(mask) == NR_CPUS) {
312                         printk("Breaking affinity for irq %i\n", irq);
313                         mask = map;
314                 }
315                 if (desc->chip->set_affinity)
316                         desc->chip->set_affinity(irq, &mask);
317                 else if (desc->action && !(warned++))
318                         printk("Cannot set affinity for irq %i\n", irq);
319         }
320
321         local_irq_enable();
322         mdelay(1);
323         local_irq_disable();
324 }
325 #endif
326
327 #ifdef CONFIG_IRQSTACKS
328 static inline void handle_one_irq(unsigned int irq)
329 {
330         struct thread_info *curtp, *irqtp;
331         unsigned long saved_sp_limit;
332         struct irq_desc *desc;
333
334         /* Switch to the irq stack to handle this */
335         curtp = current_thread_info();
336         irqtp = hardirq_ctx[smp_processor_id()];
337
338         if (curtp == irqtp) {
339                 /* We're already on the irq stack, just handle it */
340                 generic_handle_irq(irq);
341                 return;
342         }
343
344         desc = irq_to_desc(irq);
345         saved_sp_limit = current->thread.ksp_limit;
346
347         irqtp->task = curtp->task;
348         irqtp->flags = 0;
349
350         /* Copy the softirq bits in preempt_count so that the
351          * softirq checks work in the hardirq context. */
352         irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
353                                (curtp->preempt_count & SOFTIRQ_MASK);
354
355         current->thread.ksp_limit = (unsigned long)irqtp +
356                 _ALIGN_UP(sizeof(struct thread_info), 16);
357
358         call_handle_irq(irq, desc, irqtp, desc->handle_irq);
359         current->thread.ksp_limit = saved_sp_limit;
360         irqtp->task = NULL;
361
362         /* Set any flag that may have been set on the
363          * alternate stack
364          */
365         if (irqtp->flags)
366                 set_bits(irqtp->flags, &curtp->flags);
367 }
368 #else
369 static inline void handle_one_irq(unsigned int irq)
370 {
371         generic_handle_irq(irq);
372 }
373 #endif
374
375 static inline void check_stack_overflow(void)
376 {
377 #ifdef CONFIG_DEBUG_STACKOVERFLOW
378         long sp;
379
380         sp = __get_SP() & (THREAD_SIZE-1);
381
382         /* check for stack overflow: is there less than 2KB free? */
383         if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
384                 printk("do_IRQ: stack overflow: %ld\n",
385                         sp - sizeof(struct thread_info));
386                 dump_stack();
387         }
388 #endif
389 }
390
391 void do_IRQ(struct pt_regs *regs)
392 {
393         struct pt_regs *old_regs = set_irq_regs(regs);
394         unsigned int irq;
395
396         trace_irq_entry(regs);
397
398         irq_enter();
399
400         check_stack_overflow();
401
402         irq = ppc_md.get_irq();
403
404         if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
405                 handle_one_irq(irq);
406         else if (irq != NO_IRQ_IGNORE)
407                 /* That's not SMP safe ... but who cares ? */
408                 ppc_spurious_interrupts++;
409
410         irq_exit();
411         set_irq_regs(old_regs);
412
413 #ifdef CONFIG_PPC_ISERIES
414         if (firmware_has_feature(FW_FEATURE_ISERIES) &&
415                         get_lppaca()->int_dword.fields.decr_int) {
416                 get_lppaca()->int_dword.fields.decr_int = 0;
417                 /* Signal a fake decrementer interrupt */
418                 timer_interrupt(regs);
419         }
420 #endif
421
422         trace_irq_exit(regs);
423 }
424
425 void __init init_IRQ(void)
426 {
427         if (ppc_md.init_IRQ)
428                 ppc_md.init_IRQ();
429
430         exc_lvl_ctx_init();
431
432         irq_ctx_init();
433 }
434
435 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
436 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
437 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
438 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
439
440 void exc_lvl_ctx_init(void)
441 {
442         struct thread_info *tp;
443         int i;
444
445         for_each_possible_cpu(i) {
446                 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
447                 tp = critirq_ctx[i];
448                 tp->cpu = i;
449                 tp->preempt_count = 0;
450
451 #ifdef CONFIG_BOOKE
452                 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
453                 tp = dbgirq_ctx[i];
454                 tp->cpu = i;
455                 tp->preempt_count = 0;
456
457                 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
458                 tp = mcheckirq_ctx[i];
459                 tp->cpu = i;
460                 tp->preempt_count = HARDIRQ_OFFSET;
461 #endif
462         }
463 }
464 #endif
465
466 #ifdef CONFIG_IRQSTACKS
467 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
468 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
469
470 void irq_ctx_init(void)
471 {
472         struct thread_info *tp;
473         int i;
474
475         for_each_possible_cpu(i) {
476                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
477                 tp = softirq_ctx[i];
478                 tp->cpu = i;
479                 tp->preempt_count = 0;
480
481                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
482                 tp = hardirq_ctx[i];
483                 tp->cpu = i;
484                 tp->preempt_count = HARDIRQ_OFFSET;
485         }
486 }
487
488 static inline void do_softirq_onstack(void)
489 {
490         struct thread_info *curtp, *irqtp;
491         unsigned long saved_sp_limit = current->thread.ksp_limit;
492
493         curtp = current_thread_info();
494         irqtp = softirq_ctx[smp_processor_id()];
495         irqtp->task = curtp->task;
496         current->thread.ksp_limit = (unsigned long)irqtp +
497                                     _ALIGN_UP(sizeof(struct thread_info), 16);
498         call_do_softirq(irqtp);
499         current->thread.ksp_limit = saved_sp_limit;
500         irqtp->task = NULL;
501 }
502
503 #else
504 #define do_softirq_onstack()    __do_softirq()
505 #endif /* CONFIG_IRQSTACKS */
506
507 void do_softirq(void)
508 {
509         unsigned long flags;
510
511         if (in_interrupt())
512                 return;
513
514         local_irq_save(flags);
515
516         if (local_softirq_pending())
517                 do_softirq_onstack();
518
519         local_irq_restore(flags);
520 }
521
522
523 /*
524  * IRQ controller and virtual interrupts
525  */
526
527 static LIST_HEAD(irq_hosts);
528 static DEFINE_SPINLOCK(irq_big_lock);
529 static unsigned int revmap_trees_allocated;
530 static DEFINE_MUTEX(revmap_trees_mutex);
531 struct irq_map_entry irq_map[NR_IRQS];
532 static unsigned int irq_virq_count = NR_IRQS;
533 static struct irq_host *irq_default_host;
534
535 irq_hw_number_t virq_to_hw(unsigned int virq)
536 {
537         return irq_map[virq].hwirq;
538 }
539 EXPORT_SYMBOL_GPL(virq_to_hw);
540
541 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
542 {
543         return h->of_node != NULL && h->of_node == np;
544 }
545
546 struct irq_host *irq_alloc_host(struct device_node *of_node,
547                                 unsigned int revmap_type,
548                                 unsigned int revmap_arg,
549                                 struct irq_host_ops *ops,
550                                 irq_hw_number_t inval_irq)
551 {
552         struct irq_host *host;
553         unsigned int size = sizeof(struct irq_host);
554         unsigned int i;
555         unsigned int *rmap;
556         unsigned long flags;
557
558         /* Allocate structure and revmap table if using linear mapping */
559         if (revmap_type == IRQ_HOST_MAP_LINEAR)
560                 size += revmap_arg * sizeof(unsigned int);
561         host = zalloc_maybe_bootmem(size, GFP_KERNEL);
562         if (host == NULL)
563                 return NULL;
564
565         /* Fill structure */
566         host->revmap_type = revmap_type;
567         host->inval_irq = inval_irq;
568         host->ops = ops;
569         host->of_node = of_node_get(of_node);
570
571         if (host->ops->match == NULL)
572                 host->ops->match = default_irq_host_match;
573
574         spin_lock_irqsave(&irq_big_lock, flags);
575
576         /* If it's a legacy controller, check for duplicates and
577          * mark it as allocated (we use irq 0 host pointer for that
578          */
579         if (revmap_type == IRQ_HOST_MAP_LEGACY) {
580                 if (irq_map[0].host != NULL) {
581                         spin_unlock_irqrestore(&irq_big_lock, flags);
582                         /* If we are early boot, we can't free the structure,
583                          * too bad...
584                          * this will be fixed once slab is made available early
585                          * instead of the current cruft
586                          */
587                         if (mem_init_done)
588                                 kfree(host);
589                         return NULL;
590                 }
591                 irq_map[0].host = host;
592         }
593
594         list_add(&host->link, &irq_hosts);
595         spin_unlock_irqrestore(&irq_big_lock, flags);
596
597         /* Additional setups per revmap type */
598         switch(revmap_type) {
599         case IRQ_HOST_MAP_LEGACY:
600                 /* 0 is always the invalid number for legacy */
601                 host->inval_irq = 0;
602                 /* setup us as the host for all legacy interrupts */
603                 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
604                         irq_map[i].hwirq = i;
605                         smp_wmb();
606                         irq_map[i].host = host;
607                         smp_wmb();
608
609                         /* Clear norequest flags */
610                         irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
611
612                         /* Legacy flags are left to default at this point,
613                          * one can then use irq_create_mapping() to
614                          * explicitly change them
615                          */
616                         ops->map(host, i, i);
617                 }
618                 break;
619         case IRQ_HOST_MAP_LINEAR:
620                 rmap = (unsigned int *)(host + 1);
621                 for (i = 0; i < revmap_arg; i++)
622                         rmap[i] = NO_IRQ;
623                 host->revmap_data.linear.size = revmap_arg;
624                 smp_wmb();
625                 host->revmap_data.linear.revmap = rmap;
626                 break;
627         default:
628                 break;
629         }
630
631         pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
632
633         return host;
634 }
635
636 struct irq_host *irq_find_host(struct device_node *node)
637 {
638         struct irq_host *h, *found = NULL;
639         unsigned long flags;
640
641         /* We might want to match the legacy controller last since
642          * it might potentially be set to match all interrupts in
643          * the absence of a device node. This isn't a problem so far
644          * yet though...
645          */
646         spin_lock_irqsave(&irq_big_lock, flags);
647         list_for_each_entry(h, &irq_hosts, link)
648                 if (h->ops->match(h, node)) {
649                         found = h;
650                         break;
651                 }
652         spin_unlock_irqrestore(&irq_big_lock, flags);
653         return found;
654 }
655 EXPORT_SYMBOL_GPL(irq_find_host);
656
657 void irq_set_default_host(struct irq_host *host)
658 {
659         pr_debug("irq: Default host set to @0x%p\n", host);
660
661         irq_default_host = host;
662 }
663
664 void irq_set_virq_count(unsigned int count)
665 {
666         pr_debug("irq: Trying to set virq count to %d\n", count);
667
668         BUG_ON(count < NUM_ISA_INTERRUPTS);
669         if (count < NR_IRQS)
670                 irq_virq_count = count;
671 }
672
673 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
674                             irq_hw_number_t hwirq)
675 {
676         struct irq_desc *desc;
677
678         desc = irq_to_desc_alloc_node(virq, 0);
679         if (!desc) {
680                 pr_debug("irq: -> allocating desc failed\n");
681                 goto error;
682         }
683
684         /* Clear IRQ_NOREQUEST flag */
685         desc->status &= ~IRQ_NOREQUEST;
686
687         /* map it */
688         smp_wmb();
689         irq_map[virq].hwirq = hwirq;
690         smp_mb();
691
692         if (host->ops->map(host, virq, hwirq)) {
693                 pr_debug("irq: -> mapping failed, freeing\n");
694                 goto error;
695         }
696
697         return 0;
698
699 error:
700         irq_free_virt(virq, 1);
701         return -1;
702 }
703
704 unsigned int irq_create_direct_mapping(struct irq_host *host)
705 {
706         unsigned int virq;
707
708         if (host == NULL)
709                 host = irq_default_host;
710
711         BUG_ON(host == NULL);
712         WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
713
714         virq = irq_alloc_virt(host, 1, 0);
715         if (virq == NO_IRQ) {
716                 pr_debug("irq: create_direct virq allocation failed\n");
717                 return NO_IRQ;
718         }
719
720         pr_debug("irq: create_direct obtained virq %d\n", virq);
721
722         if (irq_setup_virq(host, virq, virq))
723                 return NO_IRQ;
724
725         return virq;
726 }
727
728 unsigned int irq_create_mapping(struct irq_host *host,
729                                 irq_hw_number_t hwirq)
730 {
731         unsigned int virq, hint;
732
733         pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
734
735         /* Look for default host if nececssary */
736         if (host == NULL)
737                 host = irq_default_host;
738         if (host == NULL) {
739                 printk(KERN_WARNING "irq_create_mapping called for"
740                        " NULL host, hwirq=%lx\n", hwirq);
741                 WARN_ON(1);
742                 return NO_IRQ;
743         }
744         pr_debug("irq: -> using host @%p\n", host);
745
746         /* Check if mapping already exist, if it does, call
747          * host->ops->map() to update the flags
748          */
749         virq = irq_find_mapping(host, hwirq);
750         if (virq != NO_IRQ) {
751                 if (host->ops->remap)
752                         host->ops->remap(host, virq, hwirq);
753                 pr_debug("irq: -> existing mapping on virq %d\n", virq);
754                 return virq;
755         }
756
757         /* Get a virtual interrupt number */
758         if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
759                 /* Handle legacy */
760                 virq = (unsigned int)hwirq;
761                 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
762                         return NO_IRQ;
763                 return virq;
764         } else {
765                 /* Allocate a virtual interrupt number */
766                 hint = hwirq % irq_virq_count;
767                 virq = irq_alloc_virt(host, 1, hint);
768                 if (virq == NO_IRQ) {
769                         pr_debug("irq: -> virq allocation failed\n");
770                         return NO_IRQ;
771                 }
772         }
773
774         if (irq_setup_virq(host, virq, hwirq))
775                 return NO_IRQ;
776
777         printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
778                 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
779
780         return virq;
781 }
782 EXPORT_SYMBOL_GPL(irq_create_mapping);
783
784 unsigned int irq_create_of_mapping(struct device_node *controller,
785                                    const u32 *intspec, unsigned int intsize)
786 {
787         struct irq_host *host;
788         irq_hw_number_t hwirq;
789         unsigned int type = IRQ_TYPE_NONE;
790         unsigned int virq;
791
792         if (controller == NULL)
793                 host = irq_default_host;
794         else
795                 host = irq_find_host(controller);
796         if (host == NULL) {
797                 printk(KERN_WARNING "irq: no irq host found for %s !\n",
798                        controller->full_name);
799                 return NO_IRQ;
800         }
801
802         /* If host has no translation, then we assume interrupt line */
803         if (host->ops->xlate == NULL)
804                 hwirq = intspec[0];
805         else {
806                 if (host->ops->xlate(host, controller, intspec, intsize,
807                                      &hwirq, &type))
808                         return NO_IRQ;
809         }
810
811         /* Create mapping */
812         virq = irq_create_mapping(host, hwirq);
813         if (virq == NO_IRQ)
814                 return virq;
815
816         /* Set type if specified and different than the current one */
817         if (type != IRQ_TYPE_NONE &&
818             type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
819                 set_irq_type(virq, type);
820         return virq;
821 }
822 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
823
824 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
825 {
826         struct of_irq oirq;
827
828         if (of_irq_map_one(dev, index, &oirq))
829                 return NO_IRQ;
830
831         return irq_create_of_mapping(oirq.controller, oirq.specifier,
832                                      oirq.size);
833 }
834 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
835
836 void irq_dispose_mapping(unsigned int virq)
837 {
838         struct irq_host *host;
839         irq_hw_number_t hwirq;
840
841         if (virq == NO_IRQ)
842                 return;
843
844         host = irq_map[virq].host;
845         WARN_ON (host == NULL);
846         if (host == NULL)
847                 return;
848
849         /* Never unmap legacy interrupts */
850         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
851                 return;
852
853         /* remove chip and handler */
854         set_irq_chip_and_handler(virq, NULL, NULL);
855
856         /* Make sure it's completed */
857         synchronize_irq(virq);
858
859         /* Tell the PIC about it */
860         if (host->ops->unmap)
861                 host->ops->unmap(host, virq);
862         smp_mb();
863
864         /* Clear reverse map */
865         hwirq = irq_map[virq].hwirq;
866         switch(host->revmap_type) {
867         case IRQ_HOST_MAP_LINEAR:
868                 if (hwirq < host->revmap_data.linear.size)
869                         host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
870                 break;
871         case IRQ_HOST_MAP_TREE:
872                 /*
873                  * Check if radix tree allocated yet, if not then nothing to
874                  * remove.
875                  */
876                 smp_rmb();
877                 if (revmap_trees_allocated < 1)
878                         break;
879                 mutex_lock(&revmap_trees_mutex);
880                 radix_tree_delete(&host->revmap_data.tree, hwirq);
881                 mutex_unlock(&revmap_trees_mutex);
882                 break;
883         }
884
885         /* Destroy map */
886         smp_mb();
887         irq_map[virq].hwirq = host->inval_irq;
888
889         /* Set some flags */
890         irq_to_desc(virq)->status |= IRQ_NOREQUEST;
891
892         /* Free it */
893         irq_free_virt(virq, 1);
894 }
895 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
896
897 unsigned int irq_find_mapping(struct irq_host *host,
898                               irq_hw_number_t hwirq)
899 {
900         unsigned int i;
901         unsigned int hint = hwirq % irq_virq_count;
902
903         /* Look for default host if nececssary */
904         if (host == NULL)
905                 host = irq_default_host;
906         if (host == NULL)
907                 return NO_IRQ;
908
909         /* legacy -> bail early */
910         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
911                 return hwirq;
912
913         /* Slow path does a linear search of the map */
914         if (hint < NUM_ISA_INTERRUPTS)
915                 hint = NUM_ISA_INTERRUPTS;
916         i = hint;
917         do  {
918                 if (irq_map[i].host == host &&
919                     irq_map[i].hwirq == hwirq)
920                         return i;
921                 i++;
922                 if (i >= irq_virq_count)
923                         i = NUM_ISA_INTERRUPTS;
924         } while(i != hint);
925         return NO_IRQ;
926 }
927 EXPORT_SYMBOL_GPL(irq_find_mapping);
928
929
930 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
931                                      irq_hw_number_t hwirq)
932 {
933         struct irq_map_entry *ptr;
934         unsigned int virq;
935
936         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
937
938         /*
939          * Check if the radix tree exists and has bee initialized.
940          * If not, we fallback to slow mode
941          */
942         if (revmap_trees_allocated < 2)
943                 return irq_find_mapping(host, hwirq);
944
945         /* Now try to resolve */
946         /*
947          * No rcu_read_lock(ing) needed, the ptr returned can't go under us
948          * as it's referencing an entry in the static irq_map table.
949          */
950         ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
951
952         /*
953          * If found in radix tree, then fine.
954          * Else fallback to linear lookup - this should not happen in practice
955          * as it means that we failed to insert the node in the radix tree.
956          */
957         if (ptr)
958                 virq = ptr - irq_map;
959         else
960                 virq = irq_find_mapping(host, hwirq);
961
962         return virq;
963 }
964
965 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
966                              irq_hw_number_t hwirq)
967 {
968
969         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
970
971         /*
972          * Check if the radix tree exists yet.
973          * If not, then the irq will be inserted into the tree when it gets
974          * initialized.
975          */
976         smp_rmb();
977         if (revmap_trees_allocated < 1)
978                 return;
979
980         if (virq != NO_IRQ) {
981                 mutex_lock(&revmap_trees_mutex);
982                 radix_tree_insert(&host->revmap_data.tree, hwirq,
983                                   &irq_map[virq]);
984                 mutex_unlock(&revmap_trees_mutex);
985         }
986 }
987
988 unsigned int irq_linear_revmap(struct irq_host *host,
989                                irq_hw_number_t hwirq)
990 {
991         unsigned int *revmap;
992
993         WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
994
995         /* Check revmap bounds */
996         if (unlikely(hwirq >= host->revmap_data.linear.size))
997                 return irq_find_mapping(host, hwirq);
998
999         /* Check if revmap was allocated */
1000         revmap = host->revmap_data.linear.revmap;
1001         if (unlikely(revmap == NULL))
1002                 return irq_find_mapping(host, hwirq);
1003
1004         /* Fill up revmap with slow path if no mapping found */
1005         if (unlikely(revmap[hwirq] == NO_IRQ))
1006                 revmap[hwirq] = irq_find_mapping(host, hwirq);
1007
1008         return revmap[hwirq];
1009 }
1010
1011 unsigned int irq_alloc_virt(struct irq_host *host,
1012                             unsigned int count,
1013                             unsigned int hint)
1014 {
1015         unsigned long flags;
1016         unsigned int i, j, found = NO_IRQ;
1017
1018         if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1019                 return NO_IRQ;
1020
1021         spin_lock_irqsave(&irq_big_lock, flags);
1022
1023         /* Use hint for 1 interrupt if any */
1024         if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1025             hint < irq_virq_count && irq_map[hint].host == NULL) {
1026                 found = hint;
1027                 goto hint_found;
1028         }
1029
1030         /* Look for count consecutive numbers in the allocatable
1031          * (non-legacy) space
1032          */
1033         for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1034                 if (irq_map[i].host != NULL)
1035                         j = 0;
1036                 else
1037                         j++;
1038
1039                 if (j == count) {
1040                         found = i - count + 1;
1041                         break;
1042                 }
1043         }
1044         if (found == NO_IRQ) {
1045                 spin_unlock_irqrestore(&irq_big_lock, flags);
1046                 return NO_IRQ;
1047         }
1048  hint_found:
1049         for (i = found; i < (found + count); i++) {
1050                 irq_map[i].hwirq = host->inval_irq;
1051                 smp_wmb();
1052                 irq_map[i].host = host;
1053         }
1054         spin_unlock_irqrestore(&irq_big_lock, flags);
1055         return found;
1056 }
1057
1058 void irq_free_virt(unsigned int virq, unsigned int count)
1059 {
1060         unsigned long flags;
1061         unsigned int i;
1062
1063         WARN_ON (virq < NUM_ISA_INTERRUPTS);
1064         WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1065
1066         spin_lock_irqsave(&irq_big_lock, flags);
1067         for (i = virq; i < (virq + count); i++) {
1068                 struct irq_host *host;
1069
1070                 if (i < NUM_ISA_INTERRUPTS ||
1071                     (virq + count) > irq_virq_count)
1072                         continue;
1073
1074                 host = irq_map[i].host;
1075                 irq_map[i].hwirq = host->inval_irq;
1076                 smp_wmb();
1077                 irq_map[i].host = NULL;
1078         }
1079         spin_unlock_irqrestore(&irq_big_lock, flags);
1080 }
1081
1082 int arch_early_irq_init(void)
1083 {
1084         struct irq_desc *desc;
1085         int i;
1086
1087         for (i = 0; i < NR_IRQS; i++) {
1088                 desc = irq_to_desc(i);
1089                 if (desc)
1090                         desc->status |= IRQ_NOREQUEST;
1091         }
1092
1093         return 0;
1094 }
1095
1096 int arch_init_chip_data(struct irq_desc *desc, int node)
1097 {
1098         desc->status |= IRQ_NOREQUEST;
1099         return 0;
1100 }
1101
1102 /* We need to create the radix trees late */
1103 static int irq_late_init(void)
1104 {
1105         struct irq_host *h;
1106         unsigned int i;
1107
1108         /*
1109          * No mutual exclusion with respect to accessors of the tree is needed
1110          * here as the synchronization is done via the state variable
1111          * revmap_trees_allocated.
1112          */
1113         list_for_each_entry(h, &irq_hosts, link) {
1114                 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1115                         INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1116         }
1117
1118         /*
1119          * Make sure the radix trees inits are visible before setting
1120          * the flag
1121          */
1122         smp_wmb();
1123         revmap_trees_allocated = 1;
1124
1125         /*
1126          * Insert the reverse mapping for those interrupts already present
1127          * in irq_map[].
1128          */
1129         mutex_lock(&revmap_trees_mutex);
1130         for (i = 0; i < irq_virq_count; i++) {
1131                 if (irq_map[i].host &&
1132                     (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1133                         radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1134                                           irq_map[i].hwirq, &irq_map[i]);
1135         }
1136         mutex_unlock(&revmap_trees_mutex);
1137
1138         /*
1139          * Make sure the radix trees insertions are visible before setting
1140          * the flag
1141          */
1142         smp_wmb();
1143         revmap_trees_allocated = 2;
1144
1145         return 0;
1146 }
1147 arch_initcall(irq_late_init);
1148
1149 #ifdef CONFIG_VIRQ_DEBUG
1150 static int virq_debug_show(struct seq_file *m, void *private)
1151 {
1152         unsigned long flags;
1153         struct irq_desc *desc;
1154         const char *p;
1155         char none[] = "none";
1156         int i;
1157
1158         seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1159                       "chip name", "host name");
1160
1161         for (i = 1; i < nr_irqs; i++) {
1162                 desc = irq_to_desc(i);
1163                 if (!desc)
1164                         continue;
1165
1166                 raw_spin_lock_irqsave(&desc->lock, flags);
1167
1168                 if (desc->action && desc->action->handler) {
1169                         seq_printf(m, "%5d  ", i);
1170                         seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1171
1172                         if (desc->chip && desc->chip->name)
1173                                 p = desc->chip->name;
1174                         else
1175                                 p = none;
1176                         seq_printf(m, "%-15s  ", p);
1177
1178                         if (irq_map[i].host && irq_map[i].host->of_node)
1179                                 p = irq_map[i].host->of_node->full_name;
1180                         else
1181                                 p = none;
1182                         seq_printf(m, "%s\n", p);
1183                 }
1184
1185                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1186         }
1187
1188         return 0;
1189 }
1190
1191 static int virq_debug_open(struct inode *inode, struct file *file)
1192 {
1193         return single_open(file, virq_debug_show, inode->i_private);
1194 }
1195
1196 static const struct file_operations virq_debug_fops = {
1197         .open = virq_debug_open,
1198         .read = seq_read,
1199         .llseek = seq_lseek,
1200         .release = single_release,
1201 };
1202
1203 static int __init irq_debugfs_init(void)
1204 {
1205         if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1206                                  NULL, &virq_debug_fops) == NULL)
1207                 return -ENOMEM;
1208
1209         return 0;
1210 }
1211 __initcall(irq_debugfs_init);
1212 #endif /* CONFIG_VIRQ_DEBUG */
1213
1214 #ifdef CONFIG_PPC64
1215 static int __init setup_noirqdistrib(char *str)
1216 {
1217         distribute_irqs = 0;
1218         return 1;
1219 }
1220
1221 __setup("noirqdistrib", setup_noirqdistrib);
1222 #endif /* CONFIG_PPC64 */