2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
49 #include <asm/proto.h>
52 #include <asm/timer.h>
53 #include <asm/i8259.h>
55 #include <asm/msidef.h>
56 #include <asm/hypertransport.h>
57 #include <asm/setup.h>
58 #include <asm/irq_remapping.h>
61 #include <mach_apic.h>
62 #include <mach_apicdef.h>
64 #define __apicdebuginit(type) static type __init
68 int sis_apic_bug; /* not actually supported, dummy for compile */
70 static DEFINE_SPINLOCK(ioapic_lock);
71 static DEFINE_SPINLOCK(vector_lock);
75 * Rough estimation of how many shared IRQs there are, can
81 * # of IRQ routing registers
83 int nr_ioapic_registers[MAX_IO_APICS];
85 /* I/O APIC entries */
86 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
89 /* MP IRQ source entries */
90 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
92 /* # of MP IRQ source entries */
95 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
97 int skip_ioapic_setup;
99 static int __init parse_noapic(char *str)
101 disable_ioapic_setup();
104 early_param("noapic", parse_noapic);
111 struct irq_cfg *next;
112 struct irq_pin_list *irq_2_pin;
114 cpumask_t old_domain;
115 unsigned move_cleanup_count;
117 u8 move_in_progress : 1;
120 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
121 static struct irq_cfg irq_cfg_legacy[] __initdata = {
122 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
123 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
124 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
125 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
126 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
127 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
128 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
129 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
130 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
131 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
132 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
133 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
134 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
135 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
136 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
137 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
140 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
141 /* need to be biger than size of irq_cfg_legacy */
142 static int nr_irq_cfg = 32;
144 static int __init parse_nr_irq_cfg(char *arg)
147 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
154 early_param("nr_irq_cfg", parse_nr_irq_cfg);
156 static void init_one_irq_cfg(struct irq_cfg *cfg)
158 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
161 static struct irq_cfg *irq_cfgx;
162 static struct irq_cfg *irq_cfgx_free;
163 static void __init init_work(void *data)
165 struct dyn_array *da = data;
172 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
174 legacy_count = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
175 for (i = legacy_count; i < *da->nr; i++)
176 init_one_irq_cfg(&cfg[i]);
178 for (i = 1; i < *da->nr; i++)
179 cfg[i-1].next = &cfg[i];
181 irq_cfgx_free = &irq_cfgx[legacy_count];
182 irq_cfgx[legacy_count - 1].next = NULL;
185 #define for_each_irq_cfg(cfg) \
186 for (cfg = irq_cfgx; cfg; cfg = cfg->next)
188 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
190 static struct irq_cfg *irq_cfg(unsigned int irq)
205 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
207 struct irq_cfg *cfg, *cfg_pri;
211 cfg_pri = cfg = irq_cfgx;
221 if (!irq_cfgx_free) {
223 unsigned long total_bytes;
225 * we run out of pre-allocate ones, allocate more
227 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
229 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
231 cfg = kzalloc(total_bytes, GFP_ATOMIC);
233 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
236 panic("please boot with nr_irq_cfg= %d\n", count * 2);
239 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
241 for (i = 0; i < nr_irq_cfg; i++)
242 init_one_irq_cfg(&cfg[i]);
244 for (i = 1; i < nr_irq_cfg; i++)
245 cfg[i-1].next = &cfg[i];
251 irq_cfgx_free = irq_cfgx_free->next;
258 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
259 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
261 /* dump the results */
264 unsigned long bytes = sizeof(struct irq_cfg);
266 printk(KERN_DEBUG "=========================== %d\n", irq);
267 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
268 for_each_irq_cfg(cfg) {
270 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
272 printk(KERN_DEBUG "===========================\n");
279 * This is performance-critical, we want to do it O(1)
281 * the indexing order of this array favors 1:1 mappings
282 * between pins and IRQs.
285 struct irq_pin_list {
287 struct irq_pin_list *next;
290 static struct irq_pin_list *irq_2_pin_head;
291 /* fill one page ? */
292 static int nr_irq_2_pin = 0x100;
293 static struct irq_pin_list *irq_2_pin_ptr;
294 static void __init irq_2_pin_init_work(void *data)
296 struct dyn_array *da = data;
297 struct irq_pin_list *pin;
302 for (i = 1; i < *da->nr; i++)
303 pin[i-1].next = &pin[i];
305 irq_2_pin_ptr = &pin[0];
307 DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
309 static struct irq_pin_list *get_one_free_irq_2_pin(void)
311 struct irq_pin_list *pin;
317 irq_2_pin_ptr = pin->next;
323 * we run out of pre-allocate ones, allocate more
325 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
328 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
331 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
332 nr_irq_2_pin, PAGE_SIZE, 0);
335 panic("can not get more irq_2_pin\n");
337 for (i = 1; i < nr_irq_2_pin; i++)
338 pin[i-1].next = &pin[i];
340 irq_2_pin_ptr = pin->next;
348 unsigned int unused[3];
352 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
354 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
355 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
358 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
360 struct io_apic __iomem *io_apic = io_apic_base(apic);
361 writel(reg, &io_apic->index);
362 return readl(&io_apic->data);
365 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
367 struct io_apic __iomem *io_apic = io_apic_base(apic);
368 writel(reg, &io_apic->index);
369 writel(value, &io_apic->data);
373 * Re-write a value: to be used for read-modify-write
374 * cycles where the read already set up the index register.
376 static inline void io_apic_modify(unsigned int apic, unsigned int value)
378 struct io_apic __iomem *io_apic = io_apic_base(apic);
379 writel(value, &io_apic->data);
382 static bool io_apic_level_ack_pending(unsigned int irq)
384 struct irq_pin_list *entry;
386 struct irq_cfg *cfg = irq_cfg(irq);
388 spin_lock_irqsave(&ioapic_lock, flags);
389 entry = cfg->irq_2_pin;
397 reg = io_apic_read(entry->apic, 0x10 + pin*2);
398 /* Is the remote IRR bit set? */
399 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
400 spin_unlock_irqrestore(&ioapic_lock, flags);
407 spin_unlock_irqrestore(&ioapic_lock, flags);
413 struct { u32 w1, w2; };
414 struct IO_APIC_route_entry entry;
417 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
419 union entry_union eu;
421 spin_lock_irqsave(&ioapic_lock, flags);
422 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
423 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
424 spin_unlock_irqrestore(&ioapic_lock, flags);
429 * When we write a new IO APIC routing entry, we need to write the high
430 * word first! If the mask bit in the low word is clear, we will enable
431 * the interrupt, and we need to make sure the entry is fully populated
432 * before that happens.
435 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
437 union entry_union eu;
439 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
440 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
443 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
446 spin_lock_irqsave(&ioapic_lock, flags);
447 __ioapic_write_entry(apic, pin, e);
448 spin_unlock_irqrestore(&ioapic_lock, flags);
452 * When we mask an IO APIC routing entry, we need to write the low
453 * word first, in order to set the mask bit before we change the
456 static void ioapic_mask_entry(int apic, int pin)
459 union entry_union eu = { .entry.mask = 1 };
461 spin_lock_irqsave(&ioapic_lock, flags);
462 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
463 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
464 spin_unlock_irqrestore(&ioapic_lock, flags);
468 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
472 struct irq_pin_list *entry;
475 entry = cfg->irq_2_pin;
484 #ifdef CONFIG_INTR_REMAP
486 * With interrupt-remapping, destination information comes
487 * from interrupt-remapping table entry.
489 if (!irq_remapped(irq))
490 io_apic_write(apic, 0x11 + pin*2, dest);
492 io_apic_write(apic, 0x11 + pin*2, dest);
494 reg = io_apic_read(apic, 0x10 + pin*2);
495 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
497 io_apic_modify(apic, reg);
504 static int assign_irq_vector(int irq, cpumask_t mask);
506 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
508 struct irq_cfg *cfg = irq_cfg(irq);
512 struct irq_desc *desc;
514 cpus_and(tmp, mask, cpu_online_map);
518 if (assign_irq_vector(irq, mask))
521 cpus_and(tmp, cfg->domain, mask);
522 dest = cpu_mask_to_apicid(tmp);
525 * Only the high 8 bits are valid.
527 dest = SET_APIC_LOGICAL_ID(dest);
529 desc = irq_to_desc(irq);
530 spin_lock_irqsave(&ioapic_lock, flags);
531 __target_IO_APIC_irq(irq, dest, cfg->vector);
532 desc->affinity = mask;
533 spin_unlock_irqrestore(&ioapic_lock, flags);
538 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
539 * shared ISA-space IRQs, so we have to support them. We are super
540 * fast in the common case, and fast for shared ISA-space IRQs.
542 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
545 struct irq_pin_list *entry;
547 /* first time to refer irq_cfg, so with new */
548 cfg = irq_cfg_alloc(irq);
549 entry = cfg->irq_2_pin;
551 entry = get_one_free_irq_2_pin();
552 cfg->irq_2_pin = entry;
555 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
559 while (entry->next) {
560 /* not again, please */
561 if (entry->apic == apic && entry->pin == pin)
567 entry->next = get_one_free_irq_2_pin();
571 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
575 * Reroute an IRQ to a different pin.
577 static void __init replace_pin_at_irq(unsigned int irq,
578 int oldapic, int oldpin,
579 int newapic, int newpin)
581 struct irq_cfg *cfg = irq_cfg(irq);
582 struct irq_pin_list *entry = cfg->irq_2_pin;
586 if (entry->apic == oldapic && entry->pin == oldpin) {
587 entry->apic = newapic;
590 /* every one is different, right? */
596 /* why? call replace before add? */
598 add_pin_to_irq(irq, newapic, newpin);
602 * Synchronize the IO-APIC and the CPU by doing
603 * a dummy read from the IO-APIC
605 static inline void io_apic_sync(unsigned int apic)
607 struct io_apic __iomem *io_apic = io_apic_base(apic);
608 readl(&io_apic->data);
611 #define __DO_ACTION(R, ACTION, FINAL) \
615 struct irq_cfg *cfg; \
616 struct irq_pin_list *entry; \
618 cfg = irq_cfg(irq); \
619 entry = cfg->irq_2_pin; \
625 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
627 io_apic_modify(entry->apic, reg); \
631 entry = entry->next; \
635 #define DO_ACTION(name,R,ACTION, FINAL) \
637 static void name##_IO_APIC_irq (unsigned int irq) \
638 __DO_ACTION(R, ACTION, FINAL)
641 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
644 DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
646 static void mask_IO_APIC_irq (unsigned int irq)
650 spin_lock_irqsave(&ioapic_lock, flags);
651 __mask_IO_APIC_irq(irq);
652 spin_unlock_irqrestore(&ioapic_lock, flags);
655 static void unmask_IO_APIC_irq (unsigned int irq)
659 spin_lock_irqsave(&ioapic_lock, flags);
660 __unmask_IO_APIC_irq(irq);
661 spin_unlock_irqrestore(&ioapic_lock, flags);
664 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
666 struct IO_APIC_route_entry entry;
668 /* Check delivery_mode to be sure we're not clearing an SMI pin */
669 entry = ioapic_read_entry(apic, pin);
670 if (entry.delivery_mode == dest_SMI)
673 * Disable it in the IO-APIC irq-routing table:
675 ioapic_mask_entry(apic, pin);
678 static void clear_IO_APIC (void)
682 for (apic = 0; apic < nr_ioapics; apic++)
683 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
684 clear_IO_APIC_pin(apic, pin);
687 #ifdef CONFIG_INTR_REMAP
688 /* I/O APIC RTE contents at the OS boot up */
689 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
692 * Saves and masks all the unmasked IO-APIC RTE's
694 int save_mask_IO_APIC_setup(void)
696 union IO_APIC_reg_01 reg_01;
701 * The number of IO-APIC IRQ registers (== #pins):
703 for (apic = 0; apic < nr_ioapics; apic++) {
704 spin_lock_irqsave(&ioapic_lock, flags);
705 reg_01.raw = io_apic_read(apic, 1);
706 spin_unlock_irqrestore(&ioapic_lock, flags);
707 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
710 for (apic = 0; apic < nr_ioapics; apic++) {
711 early_ioapic_entries[apic] =
712 kzalloc(sizeof(struct IO_APIC_route_entry) *
713 nr_ioapic_registers[apic], GFP_KERNEL);
714 if (!early_ioapic_entries[apic])
718 for (apic = 0; apic < nr_ioapics; apic++)
719 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
720 struct IO_APIC_route_entry entry;
722 entry = early_ioapic_entries[apic][pin] =
723 ioapic_read_entry(apic, pin);
726 ioapic_write_entry(apic, pin, entry);
732 void restore_IO_APIC_setup(void)
736 for (apic = 0; apic < nr_ioapics; apic++)
737 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
738 ioapic_write_entry(apic, pin,
739 early_ioapic_entries[apic][pin]);
742 void reinit_intr_remapped_IO_APIC(int intr_remapping)
745 * for now plain restore of previous settings.
746 * TBD: In the case of OS enabling interrupt-remapping,
747 * IO-APIC RTE's need to be setup to point to interrupt-remapping
748 * table entries. for now, do a plain restore, and wait for
749 * the setup_IO_APIC_irqs() to do proper initialization.
751 restore_IO_APIC_setup();
756 * Find the IRQ entry number of a certain pin.
758 static int find_irq_entry(int apic, int pin, int type)
762 for (i = 0; i < mp_irq_entries; i++)
763 if (mp_irqs[i].mp_irqtype == type &&
764 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
765 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
766 mp_irqs[i].mp_dstirq == pin)
773 * Find the pin to which IRQ[irq] (ISA) is connected
775 static int __init find_isa_irq_pin(int irq, int type)
779 for (i = 0; i < mp_irq_entries; i++) {
780 int lbus = mp_irqs[i].mp_srcbus;
782 if (test_bit(lbus, mp_bus_not_pci) &&
783 (mp_irqs[i].mp_irqtype == type) &&
784 (mp_irqs[i].mp_srcbusirq == irq))
786 return mp_irqs[i].mp_dstirq;
791 static int __init find_isa_irq_apic(int irq, int type)
795 for (i = 0; i < mp_irq_entries; i++) {
796 int lbus = mp_irqs[i].mp_srcbus;
798 if (test_bit(lbus, mp_bus_not_pci) &&
799 (mp_irqs[i].mp_irqtype == type) &&
800 (mp_irqs[i].mp_srcbusirq == irq))
803 if (i < mp_irq_entries) {
805 for(apic = 0; apic < nr_ioapics; apic++) {
806 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
815 * Find a specific PCI IRQ entry.
816 * Not an __init, possibly needed by modules
818 static int pin_2_irq(int idx, int apic, int pin);
820 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
822 int apic, i, best_guess = -1;
824 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
826 if (test_bit(bus, mp_bus_not_pci)) {
827 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
830 for (i = 0; i < mp_irq_entries; i++) {
831 int lbus = mp_irqs[i].mp_srcbus;
833 for (apic = 0; apic < nr_ioapics; apic++)
834 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
835 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
838 if (!test_bit(lbus, mp_bus_not_pci) &&
839 !mp_irqs[i].mp_irqtype &&
841 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
842 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
844 if (!(apic || IO_APIC_IRQ(irq)))
847 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
850 * Use the first all-but-pin matching entry as a
851 * best-guess fuzzy result for broken mptables.
860 /* ISA interrupts are always polarity zero edge triggered,
861 * when listed as conforming in the MP table. */
863 #define default_ISA_trigger(idx) (0)
864 #define default_ISA_polarity(idx) (0)
866 /* PCI interrupts are always polarity one level triggered,
867 * when listed as conforming in the MP table. */
869 #define default_PCI_trigger(idx) (1)
870 #define default_PCI_polarity(idx) (1)
872 static int MPBIOS_polarity(int idx)
874 int bus = mp_irqs[idx].mp_srcbus;
878 * Determine IRQ line polarity (high active or low active):
880 switch (mp_irqs[idx].mp_irqflag & 3)
882 case 0: /* conforms, ie. bus-type dependent polarity */
883 if (test_bit(bus, mp_bus_not_pci))
884 polarity = default_ISA_polarity(idx);
886 polarity = default_PCI_polarity(idx);
888 case 1: /* high active */
893 case 2: /* reserved */
895 printk(KERN_WARNING "broken BIOS!!\n");
899 case 3: /* low active */
904 default: /* invalid */
906 printk(KERN_WARNING "broken BIOS!!\n");
914 static int MPBIOS_trigger(int idx)
916 int bus = mp_irqs[idx].mp_srcbus;
920 * Determine IRQ trigger mode (edge or level sensitive):
922 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
924 case 0: /* conforms, ie. bus-type dependent */
925 if (test_bit(bus, mp_bus_not_pci))
926 trigger = default_ISA_trigger(idx);
928 trigger = default_PCI_trigger(idx);
935 case 2: /* reserved */
937 printk(KERN_WARNING "broken BIOS!!\n");
946 default: /* invalid */
948 printk(KERN_WARNING "broken BIOS!!\n");
956 static inline int irq_polarity(int idx)
958 return MPBIOS_polarity(idx);
961 static inline int irq_trigger(int idx)
963 return MPBIOS_trigger(idx);
966 static int pin_2_irq(int idx, int apic, int pin)
969 int bus = mp_irqs[idx].mp_srcbus;
972 * Debugging check, we are in big trouble if this message pops up!
974 if (mp_irqs[idx].mp_dstirq != pin)
975 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
977 if (test_bit(bus, mp_bus_not_pci)) {
978 irq = mp_irqs[idx].mp_srcbusirq;
981 * PCI IRQs are mapped in order
985 irq += nr_ioapic_registers[i++];
991 void lock_vector_lock(void)
993 /* Used to the online set of cpus does not change
994 * during assign_irq_vector.
996 spin_lock(&vector_lock);
999 void unlock_vector_lock(void)
1001 spin_unlock(&vector_lock);
1004 static int __assign_irq_vector(int irq, cpumask_t mask)
1007 * NOTE! The local APIC isn't very good at handling
1008 * multiple interrupts at the same interrupt level.
1009 * As the interrupt level is determined by taking the
1010 * vector number and shifting that right by 4, we
1011 * want to spread these out a bit so that they don't
1012 * all fall in the same interrupt level.
1014 * Also, we've got to be careful not to trash gate
1015 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1017 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1018 unsigned int old_vector;
1020 struct irq_cfg *cfg;
1024 /* Only try and allocate irqs on cpus that are present */
1025 cpus_and(mask, mask, cpu_online_map);
1027 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1030 old_vector = cfg->vector;
1033 cpus_and(tmp, cfg->domain, mask);
1034 if (!cpus_empty(tmp))
1038 for_each_cpu_mask_nr(cpu, mask) {
1039 cpumask_t domain, new_mask;
1043 domain = vector_allocation_domain(cpu);
1044 cpus_and(new_mask, domain, cpu_online_map);
1046 vector = current_vector;
1047 offset = current_offset;
1050 if (vector >= first_system_vector) {
1051 /* If we run out of vectors on large boxen, must share them. */
1052 offset = (offset + 1) % 8;
1053 vector = FIRST_DEVICE_VECTOR + offset;
1055 if (unlikely(current_vector == vector))
1057 if (vector == IA32_SYSCALL_VECTOR)
1059 for_each_cpu_mask_nr(new_cpu, new_mask)
1060 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1063 current_vector = vector;
1064 current_offset = offset;
1066 cfg->move_in_progress = 1;
1067 cfg->old_domain = cfg->domain;
1069 for_each_cpu_mask_nr(new_cpu, new_mask)
1070 per_cpu(vector_irq, new_cpu)[vector] = irq;
1071 cfg->vector = vector;
1072 cfg->domain = domain;
1078 static int assign_irq_vector(int irq, cpumask_t mask)
1081 unsigned long flags;
1083 spin_lock_irqsave(&vector_lock, flags);
1084 err = __assign_irq_vector(irq, mask);
1085 spin_unlock_irqrestore(&vector_lock, flags);
1089 static void __clear_irq_vector(int irq)
1091 struct irq_cfg *cfg;
1096 BUG_ON(!cfg->vector);
1098 vector = cfg->vector;
1099 cpus_and(mask, cfg->domain, cpu_online_map);
1100 for_each_cpu_mask_nr(cpu, mask)
1101 per_cpu(vector_irq, cpu)[vector] = -1;
1104 cpus_clear(cfg->domain);
1107 void __setup_vector_irq(int cpu)
1109 /* Initialize vector_irq on a new cpu */
1110 /* This function must be called with vector_lock held */
1112 struct irq_cfg *cfg;
1114 /* Mark the inuse vectors */
1115 for_each_irq_cfg(cfg) {
1116 if (!cpu_isset(cpu, cfg->domain))
1118 vector = cfg->vector;
1120 per_cpu(vector_irq, cpu)[vector] = irq;
1122 /* Mark the free vectors */
1123 for (vector = 0; vector < NR_VECTORS; ++vector) {
1124 irq = per_cpu(vector_irq, cpu)[vector];
1129 if (!cpu_isset(cpu, cfg->domain))
1130 per_cpu(vector_irq, cpu)[vector] = -1;
1134 static struct irq_chip ioapic_chip;
1135 #ifdef CONFIG_INTR_REMAP
1136 static struct irq_chip ir_ioapic_chip;
1139 static void ioapic_register_intr(int irq, unsigned long trigger)
1141 struct irq_desc *desc;
1143 /* first time to use this irq_desc */
1145 desc = irq_to_desc(irq);
1147 desc = irq_to_desc_alloc(irq);
1150 desc->status |= IRQ_LEVEL;
1152 desc->status &= ~IRQ_LEVEL;
1154 #ifdef CONFIG_INTR_REMAP
1155 if (irq_remapped(irq)) {
1156 desc->status |= IRQ_MOVE_PCNTXT;
1158 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1162 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1163 handle_edge_irq, "edge");
1168 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1172 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1173 handle_edge_irq, "edge");
1176 static int setup_ioapic_entry(int apic, int irq,
1177 struct IO_APIC_route_entry *entry,
1178 unsigned int destination, int trigger,
1179 int polarity, int vector)
1182 * add it to the IO-APIC irq-routing table:
1184 memset(entry,0,sizeof(*entry));
1186 #ifdef CONFIG_INTR_REMAP
1187 if (intr_remapping_enabled) {
1188 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1190 struct IR_IO_APIC_route_entry *ir_entry =
1191 (struct IR_IO_APIC_route_entry *) entry;
1195 panic("No mapping iommu for ioapic %d\n", apic);
1197 index = alloc_irte(iommu, irq, 1);
1199 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1201 memset(&irte, 0, sizeof(irte));
1204 irte.dst_mode = INT_DEST_MODE;
1205 irte.trigger_mode = trigger;
1206 irte.dlvry_mode = INT_DELIVERY_MODE;
1207 irte.vector = vector;
1208 irte.dest_id = IRTE_DEST(destination);
1210 modify_irte(irq, &irte);
1212 ir_entry->index2 = (index >> 15) & 0x1;
1214 ir_entry->format = 1;
1215 ir_entry->index = (index & 0x7fff);
1219 entry->delivery_mode = INT_DELIVERY_MODE;
1220 entry->dest_mode = INT_DEST_MODE;
1221 entry->dest = destination;
1224 entry->mask = 0; /* enable IRQ */
1225 entry->trigger = trigger;
1226 entry->polarity = polarity;
1227 entry->vector = vector;
1229 /* Mask level triggered irqs.
1230 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1237 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1238 int trigger, int polarity)
1240 struct irq_cfg *cfg;
1241 struct IO_APIC_route_entry entry;
1244 if (!IO_APIC_IRQ(irq))
1250 if (assign_irq_vector(irq, mask))
1253 cpus_and(mask, cfg->domain, mask);
1255 apic_printk(APIC_VERBOSE,KERN_DEBUG
1256 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1257 "IRQ %d Mode:%i Active:%i)\n",
1258 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1259 irq, trigger, polarity);
1262 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1263 cpu_mask_to_apicid(mask), trigger, polarity,
1265 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1266 mp_ioapics[apic].mp_apicid, pin);
1267 __clear_irq_vector(irq);
1271 ioapic_register_intr(irq, trigger);
1273 disable_8259A_irq(irq);
1275 ioapic_write_entry(apic, pin, entry);
1278 static void __init setup_IO_APIC_irqs(void)
1280 int apic, pin, idx, irq, first_notcon = 1;
1282 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1284 for (apic = 0; apic < nr_ioapics; apic++) {
1285 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1287 idx = find_irq_entry(apic,pin,mp_INT);
1290 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1293 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1296 if (!first_notcon) {
1297 apic_printk(APIC_VERBOSE, " not connected.\n");
1301 irq = pin_2_irq(idx, apic, pin);
1302 add_pin_to_irq(irq, apic, pin);
1304 setup_IO_APIC_irq(apic, pin, irq,
1305 irq_trigger(idx), irq_polarity(idx));
1310 apic_printk(APIC_VERBOSE, " not connected.\n");
1314 * Set up the timer pin, possibly with the 8259A-master behind.
1316 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1319 struct IO_APIC_route_entry entry;
1321 #ifdef CONFIG_INTR_REMAP
1322 if (intr_remapping_enabled)
1326 memset(&entry, 0, sizeof(entry));
1329 * We use logical delivery to get the timer IRQ
1332 entry.dest_mode = INT_DEST_MODE;
1333 entry.mask = 1; /* mask IRQ now */
1334 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1335 entry.delivery_mode = INT_DELIVERY_MODE;
1338 entry.vector = vector;
1341 * The timer IRQ doesn't have to know that behind the
1342 * scene we may have a 8259A-master in AEOI mode ...
1344 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1347 * Add it to the IO-APIC irq-routing table:
1349 ioapic_write_entry(apic, pin, entry);
1353 __apicdebuginit(void) print_IO_APIC(void)
1356 union IO_APIC_reg_00 reg_00;
1357 union IO_APIC_reg_01 reg_01;
1358 union IO_APIC_reg_02 reg_02;
1359 unsigned long flags;
1360 struct irq_cfg *cfg;
1362 if (apic_verbosity == APIC_QUIET)
1365 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1366 for (i = 0; i < nr_ioapics; i++)
1367 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1368 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1371 * We are a bit conservative about what we expect. We have to
1372 * know about every hardware change ASAP.
1374 printk(KERN_INFO "testing the IO APIC.......................\n");
1376 for (apic = 0; apic < nr_ioapics; apic++) {
1378 spin_lock_irqsave(&ioapic_lock, flags);
1379 reg_00.raw = io_apic_read(apic, 0);
1380 reg_01.raw = io_apic_read(apic, 1);
1381 if (reg_01.bits.version >= 0x10)
1382 reg_02.raw = io_apic_read(apic, 2);
1383 spin_unlock_irqrestore(&ioapic_lock, flags);
1386 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1387 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1388 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1389 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1390 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1392 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1393 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1395 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1396 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1398 if (reg_01.bits.version >= 0x10) {
1399 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1400 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1403 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1405 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1406 " Stat Dmod Deli Vect: \n");
1408 for (i = 0; i <= reg_01.bits.entries; i++) {
1409 struct IO_APIC_route_entry entry;
1411 entry = ioapic_read_entry(apic, i);
1413 printk(KERN_DEBUG " %02x %03X ",
1418 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1423 entry.delivery_status,
1425 entry.delivery_mode,
1430 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1431 for_each_irq_cfg(cfg) {
1432 struct irq_pin_list *entry = cfg->irq_2_pin;
1435 printk(KERN_DEBUG "IRQ%d ", cfg->irq);
1437 printk("-> %d:%d", entry->apic, entry->pin);
1440 entry = entry->next;
1445 printk(KERN_INFO ".................................... done.\n");
1450 __apicdebuginit(void) print_APIC_bitfield(int base)
1455 if (apic_verbosity == APIC_QUIET)
1458 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1459 for (i = 0; i < 8; i++) {
1460 v = apic_read(base + i*0x10);
1461 for (j = 0; j < 32; j++) {
1471 __apicdebuginit(void) print_local_APIC(void *dummy)
1473 unsigned int v, ver, maxlvt;
1476 if (apic_verbosity == APIC_QUIET)
1479 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1480 smp_processor_id(), hard_smp_processor_id());
1481 v = apic_read(APIC_ID);
1482 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1483 v = apic_read(APIC_LVR);
1484 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1485 ver = GET_APIC_VERSION(v);
1486 maxlvt = lapic_get_maxlvt();
1488 v = apic_read(APIC_TASKPRI);
1489 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1491 v = apic_read(APIC_ARBPRI);
1492 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1493 v & APIC_ARBPRI_MASK);
1494 v = apic_read(APIC_PROCPRI);
1495 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1497 v = apic_read(APIC_EOI);
1498 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1499 v = apic_read(APIC_RRR);
1500 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1501 v = apic_read(APIC_LDR);
1502 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1503 v = apic_read(APIC_DFR);
1504 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1505 v = apic_read(APIC_SPIV);
1506 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1508 printk(KERN_DEBUG "... APIC ISR field:\n");
1509 print_APIC_bitfield(APIC_ISR);
1510 printk(KERN_DEBUG "... APIC TMR field:\n");
1511 print_APIC_bitfield(APIC_TMR);
1512 printk(KERN_DEBUG "... APIC IRR field:\n");
1513 print_APIC_bitfield(APIC_IRR);
1515 v = apic_read(APIC_ESR);
1516 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1518 icr = apic_icr_read();
1519 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1520 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1522 v = apic_read(APIC_LVTT);
1523 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1525 if (maxlvt > 3) { /* PC is LVT#4. */
1526 v = apic_read(APIC_LVTPC);
1527 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1529 v = apic_read(APIC_LVT0);
1530 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1531 v = apic_read(APIC_LVT1);
1532 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1534 if (maxlvt > 2) { /* ERR is LVT#3. */
1535 v = apic_read(APIC_LVTERR);
1536 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1539 v = apic_read(APIC_TMICT);
1540 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1541 v = apic_read(APIC_TMCCT);
1542 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1543 v = apic_read(APIC_TDCR);
1544 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1548 __apicdebuginit(void) print_all_local_APICs(void)
1550 on_each_cpu(print_local_APIC, NULL, 1);
1553 __apicdebuginit(void) print_PIC(void)
1556 unsigned long flags;
1558 if (apic_verbosity == APIC_QUIET)
1561 printk(KERN_DEBUG "\nprinting PIC contents\n");
1563 spin_lock_irqsave(&i8259A_lock, flags);
1565 v = inb(0xa1) << 8 | inb(0x21);
1566 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1568 v = inb(0xa0) << 8 | inb(0x20);
1569 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1573 v = inb(0xa0) << 8 | inb(0x20);
1577 spin_unlock_irqrestore(&i8259A_lock, flags);
1579 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1581 v = inb(0x4d1) << 8 | inb(0x4d0);
1582 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1585 __apicdebuginit(int) print_all_ICs(void)
1588 print_all_local_APICs();
1594 fs_initcall(print_all_ICs);
1597 /* Where if anywhere is the i8259 connect in external int mode */
1598 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1600 void __init enable_IO_APIC(void)
1602 union IO_APIC_reg_01 reg_01;
1603 int i8259_apic, i8259_pin;
1605 unsigned long flags;
1608 * The number of IO-APIC IRQ registers (== #pins):
1610 for (apic = 0; apic < nr_ioapics; apic++) {
1611 spin_lock_irqsave(&ioapic_lock, flags);
1612 reg_01.raw = io_apic_read(apic, 1);
1613 spin_unlock_irqrestore(&ioapic_lock, flags);
1614 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1616 for(apic = 0; apic < nr_ioapics; apic++) {
1618 /* See if any of the pins is in ExtINT mode */
1619 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1620 struct IO_APIC_route_entry entry;
1621 entry = ioapic_read_entry(apic, pin);
1623 /* If the interrupt line is enabled and in ExtInt mode
1624 * I have found the pin where the i8259 is connected.
1626 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1627 ioapic_i8259.apic = apic;
1628 ioapic_i8259.pin = pin;
1634 /* Look to see what if the MP table has reported the ExtINT */
1635 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1636 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1637 /* Trust the MP table if nothing is setup in the hardware */
1638 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1639 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1640 ioapic_i8259.pin = i8259_pin;
1641 ioapic_i8259.apic = i8259_apic;
1643 /* Complain if the MP table and the hardware disagree */
1644 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1645 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1647 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1651 * Do not trust the IO-APIC being empty at bootup
1657 * Not an __init, needed by the reboot code
1659 void disable_IO_APIC(void)
1662 * Clear the IO-APIC before rebooting:
1667 * If the i8259 is routed through an IOAPIC
1668 * Put that IOAPIC in virtual wire mode
1669 * so legacy interrupts can be delivered.
1671 if (ioapic_i8259.pin != -1) {
1672 struct IO_APIC_route_entry entry;
1674 memset(&entry, 0, sizeof(entry));
1675 entry.mask = 0; /* Enabled */
1676 entry.trigger = 0; /* Edge */
1678 entry.polarity = 0; /* High */
1679 entry.delivery_status = 0;
1680 entry.dest_mode = 0; /* Physical */
1681 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1683 entry.dest = read_apic_id();
1686 * Add it to the IO-APIC irq-routing table:
1688 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1691 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1694 int no_timer_check __initdata;
1696 static int __init notimercheck(char *s)
1701 __setup("no_timer_check", notimercheck);
1704 * There is a nasty bug in some older SMP boards, their mptable lies
1705 * about the timer IRQ. We do the following to work around the situation:
1707 * - timer IRQ defaults to IO-APIC IRQ
1708 * - if this function detects that timer IRQs are defunct, then we fall
1709 * back to ISA timer IRQs
1711 static int __init timer_irq_works(void)
1713 unsigned long t1 = jiffies;
1714 unsigned long flags;
1719 local_save_flags(flags);
1721 /* Let ten ticks pass... */
1722 mdelay((10 * 1000) / HZ);
1723 local_irq_restore(flags);
1726 * Expect a few ticks at least, to be sure some possible
1727 * glue logic does not lock up after one or two first
1728 * ticks in a non-ExtINT mode. Also the local APIC
1729 * might have cached one ExtINT interrupt. Finally, at
1730 * least one tick may be lost due to delays.
1734 if (time_after(jiffies, t1 + 4))
1740 * In the SMP+IOAPIC case it might happen that there are an unspecified
1741 * number of pending IRQ events unhandled. These cases are very rare,
1742 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1743 * better to do it this way as thus we do not have to be aware of
1744 * 'pending' interrupts in the IRQ path, except at this point.
1747 * Edge triggered needs to resend any interrupt
1748 * that was delayed but this is now handled in the device
1753 * Starting up a edge-triggered IO-APIC interrupt is
1754 * nasty - we need to make sure that we get the edge.
1755 * If it is already asserted for some reason, we need
1756 * return 1 to indicate that is was pending.
1758 * This is not complete - we should be able to fake
1759 * an edge even if it isn't on the 8259A...
1762 static unsigned int startup_ioapic_irq(unsigned int irq)
1764 int was_pending = 0;
1765 unsigned long flags;
1767 spin_lock_irqsave(&ioapic_lock, flags);
1769 disable_8259A_irq(irq);
1770 if (i8259A_irq_pending(irq))
1773 __unmask_IO_APIC_irq(irq);
1774 spin_unlock_irqrestore(&ioapic_lock, flags);
1779 static int ioapic_retrigger_irq(unsigned int irq)
1781 struct irq_cfg *cfg = irq_cfg(irq);
1782 unsigned long flags;
1784 spin_lock_irqsave(&vector_lock, flags);
1785 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1786 spin_unlock_irqrestore(&vector_lock, flags);
1792 * Level and edge triggered IO-APIC interrupts need different handling,
1793 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1794 * handled with the level-triggered descriptor, but that one has slightly
1795 * more overhead. Level-triggered interrupts cannot be handled with the
1796 * edge-triggered handler, without risking IRQ storms and other ugly
1802 #ifdef CONFIG_INTR_REMAP
1803 static void ir_irq_migration(struct work_struct *work);
1805 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1808 * Migrate the IO-APIC irq in the presence of intr-remapping.
1810 * For edge triggered, irq migration is a simple atomic update(of vector
1811 * and cpu destination) of IRTE and flush the hardware cache.
1813 * For level triggered, we need to modify the io-apic RTE aswell with the update
1814 * vector information, along with modifying IRTE with vector and destination.
1815 * So irq migration for level triggered is little bit more complex compared to
1816 * edge triggered migration. But the good news is, we use the same algorithm
1817 * for level triggered migration as we have today, only difference being,
1818 * we now initiate the irq migration from process context instead of the
1819 * interrupt context.
1821 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1822 * suppression) to the IO-APIC, level triggered irq migration will also be
1823 * as simple as edge triggered migration and we can do the irq migration
1824 * with a simple atomic update to IO-APIC RTE.
1826 static void migrate_ioapic_irq(int irq, cpumask_t mask)
1828 struct irq_cfg *cfg;
1829 struct irq_desc *desc;
1830 cpumask_t tmp, cleanup_mask;
1832 int modify_ioapic_rte;
1834 unsigned long flags;
1836 cpus_and(tmp, mask, cpu_online_map);
1837 if (cpus_empty(tmp))
1840 if (get_irte(irq, &irte))
1843 if (assign_irq_vector(irq, mask))
1847 cpus_and(tmp, cfg->domain, mask);
1848 dest = cpu_mask_to_apicid(tmp);
1850 desc = irq_to_desc(irq);
1851 modify_ioapic_rte = desc->status & IRQ_LEVEL;
1852 if (modify_ioapic_rte) {
1853 spin_lock_irqsave(&ioapic_lock, flags);
1854 __target_IO_APIC_irq(irq, dest, cfg->vector);
1855 spin_unlock_irqrestore(&ioapic_lock, flags);
1858 irte.vector = cfg->vector;
1859 irte.dest_id = IRTE_DEST(dest);
1862 * Modified the IRTE and flushes the Interrupt entry cache.
1864 modify_irte(irq, &irte);
1866 if (cfg->move_in_progress) {
1867 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1868 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1869 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1870 cfg->move_in_progress = 0;
1873 desc->affinity = mask;
1876 static int migrate_irq_remapped_level(int irq)
1879 struct irq_desc *desc = irq_to_desc(irq);
1881 mask_IO_APIC_irq(irq);
1883 if (io_apic_level_ack_pending(irq)) {
1885 * Interrupt in progress. Migrating irq now will change the
1886 * vector information in the IO-APIC RTE and that will confuse
1887 * the EOI broadcast performed by cpu.
1888 * So, delay the irq migration to the next instance.
1890 schedule_delayed_work(&ir_migration_work, 1);
1894 /* everthing is clear. we have right of way */
1895 migrate_ioapic_irq(irq, desc->pending_mask);
1898 desc->status &= ~IRQ_MOVE_PENDING;
1899 cpus_clear(desc->pending_mask);
1902 unmask_IO_APIC_irq(irq);
1906 static void ir_irq_migration(struct work_struct *work)
1909 struct irq_desc *desc;
1911 for_each_irq_desc(irq, desc) {
1912 if (desc->status & IRQ_MOVE_PENDING) {
1913 unsigned long flags;
1915 spin_lock_irqsave(&desc->lock, flags);
1916 if (!desc->chip->set_affinity ||
1917 !(desc->status & IRQ_MOVE_PENDING)) {
1918 desc->status &= ~IRQ_MOVE_PENDING;
1919 spin_unlock_irqrestore(&desc->lock, flags);
1923 desc->chip->set_affinity(irq, desc->pending_mask);
1924 spin_unlock_irqrestore(&desc->lock, flags);
1930 * Migrates the IRQ destination in the process context.
1932 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1934 struct irq_desc *desc = irq_to_desc(irq);
1936 if (desc->status & IRQ_LEVEL) {
1937 desc->status |= IRQ_MOVE_PENDING;
1938 desc->pending_mask = mask;
1939 migrate_irq_remapped_level(irq);
1943 migrate_ioapic_irq(irq, mask);
1947 asmlinkage void smp_irq_move_cleanup_interrupt(void)
1949 unsigned vector, me;
1954 me = smp_processor_id();
1955 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1957 struct irq_desc *desc;
1958 struct irq_cfg *cfg;
1959 irq = __get_cpu_var(vector_irq)[vector];
1961 desc = irq_to_desc(irq);
1966 spin_lock(&desc->lock);
1967 if (!cfg->move_cleanup_count)
1970 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1973 __get_cpu_var(vector_irq)[vector] = -1;
1974 cfg->move_cleanup_count--;
1976 spin_unlock(&desc->lock);
1982 static void irq_complete_move(unsigned int irq)
1984 struct irq_cfg *cfg = irq_cfg(irq);
1985 unsigned vector, me;
1987 if (likely(!cfg->move_in_progress))
1990 vector = ~get_irq_regs()->orig_ax;
1991 me = smp_processor_id();
1992 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1993 cpumask_t cleanup_mask;
1995 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1996 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1997 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1998 cfg->move_in_progress = 0;
2002 static inline void irq_complete_move(unsigned int irq) {}
2004 #ifdef CONFIG_INTR_REMAP
2005 static void ack_x2apic_level(unsigned int irq)
2010 static void ack_x2apic_edge(unsigned int irq)
2016 static void ack_apic_edge(unsigned int irq)
2018 irq_complete_move(irq);
2019 move_native_irq(irq);
2023 static void ack_apic_level(unsigned int irq)
2025 int do_unmask_irq = 0;
2027 irq_complete_move(irq);
2028 #ifdef CONFIG_GENERIC_PENDING_IRQ
2029 /* If we are moving the irq we need to mask it */
2030 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2032 mask_IO_APIC_irq(irq);
2037 * We must acknowledge the irq before we move it or the acknowledge will
2038 * not propagate properly.
2042 /* Now we can move and renable the irq */
2043 if (unlikely(do_unmask_irq)) {
2044 /* Only migrate the irq if the ack has been received.
2046 * On rare occasions the broadcast level triggered ack gets
2047 * delayed going to ioapics, and if we reprogram the
2048 * vector while Remote IRR is still set the irq will never
2051 * To prevent this scenario we read the Remote IRR bit
2052 * of the ioapic. This has two effects.
2053 * - On any sane system the read of the ioapic will
2054 * flush writes (and acks) going to the ioapic from
2056 * - We get to see if the ACK has actually been delivered.
2058 * Based on failed experiments of reprogramming the
2059 * ioapic entry from outside of irq context starting
2060 * with masking the ioapic entry and then polling until
2061 * Remote IRR was clear before reprogramming the
2062 * ioapic I don't trust the Remote IRR bit to be
2063 * completey accurate.
2065 * However there appears to be no other way to plug
2066 * this race, so if the Remote IRR bit is not
2067 * accurate and is causing problems then it is a hardware bug
2068 * and you can go talk to the chipset vendor about it.
2070 if (!io_apic_level_ack_pending(irq))
2071 move_masked_irq(irq);
2072 unmask_IO_APIC_irq(irq);
2076 static struct irq_chip ioapic_chip __read_mostly = {
2078 .startup = startup_ioapic_irq,
2079 .mask = mask_IO_APIC_irq,
2080 .unmask = unmask_IO_APIC_irq,
2081 .ack = ack_apic_edge,
2082 .eoi = ack_apic_level,
2084 .set_affinity = set_ioapic_affinity_irq,
2086 .retrigger = ioapic_retrigger_irq,
2089 #ifdef CONFIG_INTR_REMAP
2090 static struct irq_chip ir_ioapic_chip __read_mostly = {
2091 .name = "IR-IO-APIC",
2092 .startup = startup_ioapic_irq,
2093 .mask = mask_IO_APIC_irq,
2094 .unmask = unmask_IO_APIC_irq,
2095 .ack = ack_x2apic_edge,
2096 .eoi = ack_x2apic_level,
2098 .set_affinity = set_ir_ioapic_affinity_irq,
2100 .retrigger = ioapic_retrigger_irq,
2104 static inline void init_IO_APIC_traps(void)
2107 struct irq_desc *desc;
2108 struct irq_cfg *cfg;
2111 * NOTE! The local APIC isn't very good at handling
2112 * multiple interrupts at the same interrupt level.
2113 * As the interrupt level is determined by taking the
2114 * vector number and shifting that right by 4, we
2115 * want to spread these out a bit so that they don't
2116 * all fall in the same interrupt level.
2118 * Also, we've got to be careful not to trash gate
2119 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2121 for_each_irq_cfg(cfg) {
2123 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2125 * Hmm.. We don't have an entry for this,
2126 * so default to an old-fashioned 8259
2127 * interrupt if we can..
2130 make_8259A_irq(irq);
2132 desc = irq_to_desc(irq);
2133 /* Strange. Oh, well.. */
2134 desc->chip = &no_irq_chip;
2140 static void unmask_lapic_irq(unsigned int irq)
2144 v = apic_read(APIC_LVT0);
2145 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2148 static void mask_lapic_irq(unsigned int irq)
2152 v = apic_read(APIC_LVT0);
2153 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2156 static void ack_lapic_irq (unsigned int irq)
2161 static struct irq_chip lapic_chip __read_mostly = {
2162 .name = "local-APIC",
2163 .mask = mask_lapic_irq,
2164 .unmask = unmask_lapic_irq,
2165 .ack = ack_lapic_irq,
2168 static void lapic_register_intr(int irq)
2170 struct irq_desc *desc;
2172 desc = irq_to_desc(irq);
2173 desc->status &= ~IRQ_LEVEL;
2174 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2178 static void __init setup_nmi(void)
2181 * Dirty trick to enable the NMI watchdog ...
2182 * We put the 8259A master into AEOI mode and
2183 * unmask on all local APICs LVT0 as NMI.
2185 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2186 * is from Maciej W. Rozycki - so we do not have to EOI from
2187 * the NMI handler or the timer interrupt.
2189 printk(KERN_INFO "activating NMI Watchdog ...");
2191 enable_NMI_through_LVT0();
2197 * This looks a bit hackish but it's about the only one way of sending
2198 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2199 * not support the ExtINT mode, unfortunately. We need to send these
2200 * cycles as some i82489DX-based boards have glue logic that keeps the
2201 * 8259A interrupt line asserted until INTA. --macro
2203 static inline void __init unlock_ExtINT_logic(void)
2206 struct IO_APIC_route_entry entry0, entry1;
2207 unsigned char save_control, save_freq_select;
2209 pin = find_isa_irq_pin(8, mp_INT);
2210 apic = find_isa_irq_apic(8, mp_INT);
2214 entry0 = ioapic_read_entry(apic, pin);
2216 clear_IO_APIC_pin(apic, pin);
2218 memset(&entry1, 0, sizeof(entry1));
2220 entry1.dest_mode = 0; /* physical delivery */
2221 entry1.mask = 0; /* unmask IRQ now */
2222 entry1.dest = hard_smp_processor_id();
2223 entry1.delivery_mode = dest_ExtINT;
2224 entry1.polarity = entry0.polarity;
2228 ioapic_write_entry(apic, pin, entry1);
2230 save_control = CMOS_READ(RTC_CONTROL);
2231 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2232 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2234 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2239 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2243 CMOS_WRITE(save_control, RTC_CONTROL);
2244 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2245 clear_IO_APIC_pin(apic, pin);
2247 ioapic_write_entry(apic, pin, entry0);
2250 static int disable_timer_pin_1 __initdata;
2251 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2252 static int __init disable_timer_pin_setup(char *arg)
2254 disable_timer_pin_1 = 1;
2257 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2259 int timer_through_8259 __initdata;
2262 * This code may look a bit paranoid, but it's supposed to cooperate with
2263 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2264 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2265 * fanatically on his truly buggy board.
2267 * FIXME: really need to revamp this for modern platforms only.
2269 static inline void __init check_timer(void)
2271 struct irq_cfg *cfg = irq_cfg(0);
2272 int apic1, pin1, apic2, pin2;
2273 unsigned long flags;
2276 local_irq_save(flags);
2279 * get/set the timer IRQ vector:
2281 disable_8259A_irq(0);
2282 assign_irq_vector(0, TARGET_CPUS);
2285 * As IRQ0 is to be enabled in the 8259A, the virtual
2286 * wire has to be disabled in the local APIC.
2288 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2291 pin1 = find_isa_irq_pin(0, mp_INT);
2292 apic1 = find_isa_irq_apic(0, mp_INT);
2293 pin2 = ioapic_i8259.pin;
2294 apic2 = ioapic_i8259.apic;
2296 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2297 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2298 cfg->vector, apic1, pin1, apic2, pin2);
2301 * Some BIOS writers are clueless and report the ExtINTA
2302 * I/O APIC input from the cascaded 8259A as the timer
2303 * interrupt input. So just in case, if only one pin
2304 * was found above, try it both directly and through the
2308 #ifdef CONFIG_INTR_REMAP
2309 if (intr_remapping_enabled)
2310 panic("BIOS bug: timer not connected to IO-APIC");
2315 } else if (pin2 == -1) {
2322 * Ok, does IRQ0 through the IOAPIC work?
2325 add_pin_to_irq(0, apic1, pin1);
2326 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2328 unmask_IO_APIC_irq(0);
2329 if (timer_irq_works()) {
2330 if (nmi_watchdog == NMI_IO_APIC) {
2332 enable_8259A_irq(0);
2334 if (disable_timer_pin_1 > 0)
2335 clear_IO_APIC_pin(0, pin1);
2338 #ifdef CONFIG_INTR_REMAP
2339 if (intr_remapping_enabled)
2340 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2342 clear_IO_APIC_pin(apic1, pin1);
2344 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2345 "8254 timer not connected to IO-APIC\n");
2347 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2348 "(IRQ0) through the 8259A ...\n");
2349 apic_printk(APIC_QUIET, KERN_INFO
2350 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2352 * legacy devices should be connected to IO APIC #0
2354 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2355 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2356 unmask_IO_APIC_irq(0);
2357 enable_8259A_irq(0);
2358 if (timer_irq_works()) {
2359 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2360 timer_through_8259 = 1;
2361 if (nmi_watchdog == NMI_IO_APIC) {
2362 disable_8259A_irq(0);
2364 enable_8259A_irq(0);
2369 * Cleanup, just in case ...
2371 disable_8259A_irq(0);
2372 clear_IO_APIC_pin(apic2, pin2);
2373 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2376 if (nmi_watchdog == NMI_IO_APIC) {
2377 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2378 "through the IO-APIC - disabling NMI Watchdog!\n");
2379 nmi_watchdog = NMI_NONE;
2382 apic_printk(APIC_QUIET, KERN_INFO
2383 "...trying to set up timer as Virtual Wire IRQ...\n");
2385 lapic_register_intr(0);
2386 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2387 enable_8259A_irq(0);
2389 if (timer_irq_works()) {
2390 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2393 disable_8259A_irq(0);
2394 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2395 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2397 apic_printk(APIC_QUIET, KERN_INFO
2398 "...trying to set up timer as ExtINT IRQ...\n");
2402 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2404 unlock_ExtINT_logic();
2406 if (timer_irq_works()) {
2407 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2410 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2411 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2412 "report. Then try booting with the 'noapic' option.\n");
2414 local_irq_restore(flags);
2418 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2419 * to devices. However there may be an I/O APIC pin available for
2420 * this interrupt regardless. The pin may be left unconnected, but
2421 * typically it will be reused as an ExtINT cascade interrupt for
2422 * the master 8259A. In the MPS case such a pin will normally be
2423 * reported as an ExtINT interrupt in the MP table. With ACPI
2424 * there is no provision for ExtINT interrupts, and in the absence
2425 * of an override it would be treated as an ordinary ISA I/O APIC
2426 * interrupt, that is edge-triggered and unmasked by default. We
2427 * used to do this, but it caused problems on some systems because
2428 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2429 * the same ExtINT cascade interrupt to drive the local APIC of the
2430 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2431 * the I/O APIC in all cases now. No actual device should request
2432 * it anyway. --macro
2434 #define PIC_IRQS (1<<2)
2436 void __init setup_IO_APIC(void)
2440 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2443 io_apic_irqs = ~PIC_IRQS;
2445 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2448 setup_IO_APIC_irqs();
2449 init_IO_APIC_traps();
2453 struct sysfs_ioapic_data {
2454 struct sys_device dev;
2455 struct IO_APIC_route_entry entry[0];
2457 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2459 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2461 struct IO_APIC_route_entry *entry;
2462 struct sysfs_ioapic_data *data;
2465 data = container_of(dev, struct sysfs_ioapic_data, dev);
2466 entry = data->entry;
2467 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2468 *entry = ioapic_read_entry(dev->id, i);
2473 static int ioapic_resume(struct sys_device *dev)
2475 struct IO_APIC_route_entry *entry;
2476 struct sysfs_ioapic_data *data;
2477 unsigned long flags;
2478 union IO_APIC_reg_00 reg_00;
2481 data = container_of(dev, struct sysfs_ioapic_data, dev);
2482 entry = data->entry;
2484 spin_lock_irqsave(&ioapic_lock, flags);
2485 reg_00.raw = io_apic_read(dev->id, 0);
2486 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2487 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2488 io_apic_write(dev->id, 0, reg_00.raw);
2490 spin_unlock_irqrestore(&ioapic_lock, flags);
2491 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2492 ioapic_write_entry(dev->id, i, entry[i]);
2497 static struct sysdev_class ioapic_sysdev_class = {
2499 .suspend = ioapic_suspend,
2500 .resume = ioapic_resume,
2503 static int __init ioapic_init_sysfs(void)
2505 struct sys_device * dev;
2508 error = sysdev_class_register(&ioapic_sysdev_class);
2512 for (i = 0; i < nr_ioapics; i++ ) {
2513 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2514 * sizeof(struct IO_APIC_route_entry);
2515 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2516 if (!mp_ioapic_data[i]) {
2517 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2520 dev = &mp_ioapic_data[i]->dev;
2522 dev->cls = &ioapic_sysdev_class;
2523 error = sysdev_register(dev);
2525 kfree(mp_ioapic_data[i]);
2526 mp_ioapic_data[i] = NULL;
2527 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2535 device_initcall(ioapic_init_sysfs);
2538 * Dynamic irq allocate and deallocation
2540 unsigned int create_irq_nr(unsigned int irq_want)
2542 /* Allocate an unused irq */
2545 unsigned long flags;
2546 struct irq_cfg *cfg_new;
2548 #ifndef CONFIG_HAVE_SPARSE_IRQ
2549 irq_want = nr_irqs - 1;
2553 spin_lock_irqsave(&vector_lock, flags);
2554 for (new = irq_want; new > 0; new--) {
2555 if (platform_legacy_irq(new))
2557 cfg_new = irq_cfg(new);
2558 if (cfg_new && cfg_new->vector != 0)
2560 /* check if need to create one */
2562 cfg_new = irq_cfg_alloc(new);
2563 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2567 spin_unlock_irqrestore(&vector_lock, flags);
2570 dynamic_irq_init(irq);
2575 int create_irq(void)
2579 irq = create_irq_nr(nr_irqs - 1);
2587 void destroy_irq(unsigned int irq)
2589 unsigned long flags;
2591 dynamic_irq_cleanup(irq);
2593 #ifdef CONFIG_INTR_REMAP
2596 spin_lock_irqsave(&vector_lock, flags);
2597 __clear_irq_vector(irq);
2598 spin_unlock_irqrestore(&vector_lock, flags);
2602 * MSI message composition
2604 #ifdef CONFIG_PCI_MSI
2605 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2607 struct irq_cfg *cfg;
2613 err = assign_irq_vector(irq, tmp);
2618 cpus_and(tmp, cfg->domain, tmp);
2619 dest = cpu_mask_to_apicid(tmp);
2621 #ifdef CONFIG_INTR_REMAP
2622 if (irq_remapped(irq)) {
2627 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2628 BUG_ON(ir_index == -1);
2630 memset (&irte, 0, sizeof(irte));
2633 irte.dst_mode = INT_DEST_MODE;
2634 irte.trigger_mode = 0; /* edge */
2635 irte.dlvry_mode = INT_DELIVERY_MODE;
2636 irte.vector = cfg->vector;
2637 irte.dest_id = IRTE_DEST(dest);
2639 modify_irte(irq, &irte);
2641 msg->address_hi = MSI_ADDR_BASE_HI;
2642 msg->data = sub_handle;
2643 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2645 MSI_ADDR_IR_INDEX1(ir_index) |
2646 MSI_ADDR_IR_INDEX2(ir_index);
2650 msg->address_hi = MSI_ADDR_BASE_HI;
2653 ((INT_DEST_MODE == 0) ?
2654 MSI_ADDR_DEST_MODE_PHYSICAL:
2655 MSI_ADDR_DEST_MODE_LOGICAL) |
2656 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2657 MSI_ADDR_REDIRECTION_CPU:
2658 MSI_ADDR_REDIRECTION_LOWPRI) |
2659 MSI_ADDR_DEST_ID(dest);
2662 MSI_DATA_TRIGGER_EDGE |
2663 MSI_DATA_LEVEL_ASSERT |
2664 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2665 MSI_DATA_DELIVERY_FIXED:
2666 MSI_DATA_DELIVERY_LOWPRI) |
2667 MSI_DATA_VECTOR(cfg->vector);
2673 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2675 struct irq_cfg *cfg;
2679 struct irq_desc *desc;
2681 cpus_and(tmp, mask, cpu_online_map);
2682 if (cpus_empty(tmp))
2685 if (assign_irq_vector(irq, mask))
2689 cpus_and(tmp, cfg->domain, mask);
2690 dest = cpu_mask_to_apicid(tmp);
2692 read_msi_msg(irq, &msg);
2694 msg.data &= ~MSI_DATA_VECTOR_MASK;
2695 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2696 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2697 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2699 write_msi_msg(irq, &msg);
2700 desc = irq_to_desc(irq);
2701 desc->affinity = mask;
2704 #ifdef CONFIG_INTR_REMAP
2706 * Migrate the MSI irq to another cpumask. This migration is
2707 * done in the process context using interrupt-remapping hardware.
2709 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2711 struct irq_cfg *cfg;
2713 cpumask_t tmp, cleanup_mask;
2715 struct irq_desc *desc;
2717 cpus_and(tmp, mask, cpu_online_map);
2718 if (cpus_empty(tmp))
2721 if (get_irte(irq, &irte))
2724 if (assign_irq_vector(irq, mask))
2728 cpus_and(tmp, cfg->domain, mask);
2729 dest = cpu_mask_to_apicid(tmp);
2731 irte.vector = cfg->vector;
2732 irte.dest_id = IRTE_DEST(dest);
2735 * atomically update the IRTE with the new destination and vector.
2737 modify_irte(irq, &irte);
2740 * After this point, all the interrupts will start arriving
2741 * at the new destination. So, time to cleanup the previous
2742 * vector allocation.
2744 if (cfg->move_in_progress) {
2745 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2746 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2747 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2748 cfg->move_in_progress = 0;
2751 desc = irq_to_desc(irq);
2752 desc->affinity = mask;
2755 #endif /* CONFIG_SMP */
2758 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2759 * which implement the MSI or MSI-X Capability Structure.
2761 static struct irq_chip msi_chip = {
2763 .unmask = unmask_msi_irq,
2764 .mask = mask_msi_irq,
2765 .ack = ack_apic_edge,
2767 .set_affinity = set_msi_irq_affinity,
2769 .retrigger = ioapic_retrigger_irq,
2772 #ifdef CONFIG_INTR_REMAP
2773 static struct irq_chip msi_ir_chip = {
2774 .name = "IR-PCI-MSI",
2775 .unmask = unmask_msi_irq,
2776 .mask = mask_msi_irq,
2777 .ack = ack_x2apic_edge,
2779 .set_affinity = ir_set_msi_irq_affinity,
2781 .retrigger = ioapic_retrigger_irq,
2785 * Map the PCI dev to the corresponding remapping hardware unit
2786 * and allocate 'nvec' consecutive interrupt-remapping table entries
2789 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
2791 struct intel_iommu *iommu;
2794 iommu = map_dev_to_ir(dev);
2797 "Unable to map PCI %s to iommu\n", pci_name(dev));
2801 index = alloc_irte(iommu, irq, nvec);
2804 "Unable to allocate %d IRTE for PCI %s\n", nvec,
2812 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2817 ret = msi_compose_msg(dev, irq, &msg);
2821 set_irq_msi(irq, desc);
2822 write_msi_msg(irq, &msg);
2824 #ifdef CONFIG_INTR_REMAP
2825 if (irq_remapped(irq)) {
2826 struct irq_desc *desc = irq_to_desc(irq);
2828 * irq migration in process context
2830 desc->status |= IRQ_MOVE_PCNTXT;
2831 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
2834 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2839 static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
2843 irq = dev->bus->number;
2851 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2855 unsigned int irq_want;
2857 irq_want = build_irq_for_pci_dev(dev) + 0x100;
2859 irq = create_irq_nr(irq_want);
2863 #ifdef CONFIG_INTR_REMAP
2864 if (!intr_remapping_enabled)
2867 ret = msi_alloc_irte(dev, irq, 1);
2872 ret = setup_msi_irq(dev, desc, irq);
2879 #ifdef CONFIG_INTR_REMAP
2886 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
2889 int ret, sub_handle;
2890 struct msi_desc *desc;
2891 unsigned int irq_want;
2893 #ifdef CONFIG_INTR_REMAP
2894 struct intel_iommu *iommu = 0;
2898 irq_want = build_irq_for_pci_dev(dev) + 0x100;
2900 list_for_each_entry(desc, &dev->msi_list, list) {
2901 irq = create_irq_nr(irq_want--);
2904 #ifdef CONFIG_INTR_REMAP
2905 if (!intr_remapping_enabled)
2910 * allocate the consecutive block of IRTE's
2913 index = msi_alloc_irte(dev, irq, nvec);
2919 iommu = map_dev_to_ir(dev);
2925 * setup the mapping between the irq and the IRTE
2926 * base index, the sub_handle pointing to the
2927 * appropriate interrupt remap table entry.
2929 set_irte_irq(irq, iommu, index, sub_handle);
2933 ret = setup_msi_irq(dev, desc, irq);
2945 void arch_teardown_msi_irq(unsigned int irq)
2952 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2954 struct irq_cfg *cfg;
2958 struct irq_desc *desc;
2960 cpus_and(tmp, mask, cpu_online_map);
2961 if (cpus_empty(tmp))
2964 if (assign_irq_vector(irq, mask))
2968 cpus_and(tmp, cfg->domain, mask);
2969 dest = cpu_mask_to_apicid(tmp);
2971 dmar_msi_read(irq, &msg);
2973 msg.data &= ~MSI_DATA_VECTOR_MASK;
2974 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2975 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2976 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2978 dmar_msi_write(irq, &msg);
2979 desc = irq_to_desc(irq);
2980 desc->affinity = mask;
2982 #endif /* CONFIG_SMP */
2984 struct irq_chip dmar_msi_type = {
2986 .unmask = dmar_msi_unmask,
2987 .mask = dmar_msi_mask,
2988 .ack = ack_apic_edge,
2990 .set_affinity = dmar_msi_set_affinity,
2992 .retrigger = ioapic_retrigger_irq,
2995 int arch_setup_dmar_msi(unsigned int irq)
3000 ret = msi_compose_msg(NULL, irq, &msg);
3003 dmar_msi_write(irq, &msg);
3004 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3010 #endif /* CONFIG_PCI_MSI */
3012 * Hypertransport interrupt support
3014 #ifdef CONFIG_HT_IRQ
3018 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3020 struct ht_irq_msg msg;
3021 fetch_ht_irq_msg(irq, &msg);
3023 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3024 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3026 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3027 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3029 write_ht_irq_msg(irq, &msg);
3032 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3034 struct irq_cfg *cfg;
3037 struct irq_desc *desc;
3039 cpus_and(tmp, mask, cpu_online_map);
3040 if (cpus_empty(tmp))
3043 if (assign_irq_vector(irq, mask))
3047 cpus_and(tmp, cfg->domain, mask);
3048 dest = cpu_mask_to_apicid(tmp);
3050 target_ht_irq(irq, dest, cfg->vector);
3051 desc = irq_to_desc(irq);
3052 desc->affinity = mask;
3056 static struct irq_chip ht_irq_chip = {
3058 .mask = mask_ht_irq,
3059 .unmask = unmask_ht_irq,
3060 .ack = ack_apic_edge,
3062 .set_affinity = set_ht_irq_affinity,
3064 .retrigger = ioapic_retrigger_irq,
3067 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3069 struct irq_cfg *cfg;
3074 err = assign_irq_vector(irq, tmp);
3076 struct ht_irq_msg msg;
3080 cpus_and(tmp, cfg->domain, tmp);
3081 dest = cpu_mask_to_apicid(tmp);
3083 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3087 HT_IRQ_LOW_DEST_ID(dest) |
3088 HT_IRQ_LOW_VECTOR(cfg->vector) |
3089 ((INT_DEST_MODE == 0) ?
3090 HT_IRQ_LOW_DM_PHYSICAL :
3091 HT_IRQ_LOW_DM_LOGICAL) |
3092 HT_IRQ_LOW_RQEOI_EDGE |
3093 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3094 HT_IRQ_LOW_MT_FIXED :
3095 HT_IRQ_LOW_MT_ARBITRATED) |
3096 HT_IRQ_LOW_IRQ_MASKED;
3098 write_ht_irq_msg(irq, &msg);
3100 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3101 handle_edge_irq, "edge");
3105 #endif /* CONFIG_HT_IRQ */
3107 /* --------------------------------------------------------------------------
3108 ACPI-based IOAPIC Configuration
3109 -------------------------------------------------------------------------- */
3113 #define IO_APIC_MAX_ID 0xFE
3115 int __init io_apic_get_redir_entries (int ioapic)
3117 union IO_APIC_reg_01 reg_01;
3118 unsigned long flags;
3120 spin_lock_irqsave(&ioapic_lock, flags);
3121 reg_01.raw = io_apic_read(ioapic, 1);
3122 spin_unlock_irqrestore(&ioapic_lock, flags);
3124 return reg_01.bits.entries;
3128 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3130 if (!IO_APIC_IRQ(irq)) {
3131 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3137 * IRQs < 16 are already in the irq_2_pin[] map
3140 add_pin_to_irq(irq, ioapic, pin);
3142 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3148 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3152 if (skip_ioapic_setup)
3155 for (i = 0; i < mp_irq_entries; i++)
3156 if (mp_irqs[i].mp_irqtype == mp_INT &&
3157 mp_irqs[i].mp_srcbusirq == bus_irq)
3159 if (i >= mp_irq_entries)
3162 *trigger = irq_trigger(i);
3163 *polarity = irq_polarity(i);
3167 #endif /* CONFIG_ACPI */
3170 * This function currently is only a helper for the i386 smp boot process where
3171 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3172 * so mask in all cases should simply be TARGET_CPUS
3175 void __init setup_ioapic_dest(void)
3177 int pin, ioapic, irq, irq_entry;
3178 struct irq_cfg *cfg;
3180 if (skip_ioapic_setup == 1)
3183 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3184 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3185 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3186 if (irq_entry == -1)
3188 irq = pin_2_irq(irq_entry, ioapic, pin);
3190 /* setup_IO_APIC_irqs could fail to get vector for some device
3191 * when you have too many devices, because at that time only boot
3196 setup_IO_APIC_irq(ioapic, pin, irq,
3197 irq_trigger(irq_entry),
3198 irq_polarity(irq_entry));
3199 #ifdef CONFIG_INTR_REMAP
3200 else if (intr_remapping_enabled)
3201 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3204 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3211 #define IOAPIC_RESOURCE_NAME_SIZE 11
3213 static struct resource *ioapic_resources;
3215 static struct resource * __init ioapic_setup_resources(void)
3218 struct resource *res;
3222 if (nr_ioapics <= 0)
3225 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3228 mem = alloc_bootmem(n);
3232 mem += sizeof(struct resource) * nr_ioapics;
3234 for (i = 0; i < nr_ioapics; i++) {
3236 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3237 sprintf(mem, "IOAPIC %u", i);
3238 mem += IOAPIC_RESOURCE_NAME_SIZE;
3242 ioapic_resources = res;
3247 void __init ioapic_init_mappings(void)
3249 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3250 struct resource *ioapic_res;
3253 ioapic_res = ioapic_setup_resources();
3254 for (i = 0; i < nr_ioapics; i++) {
3255 if (smp_found_config) {
3256 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3258 ioapic_phys = (unsigned long)
3259 alloc_bootmem_pages(PAGE_SIZE);
3260 ioapic_phys = __pa(ioapic_phys);
3262 set_fixmap_nocache(idx, ioapic_phys);
3263 apic_printk(APIC_VERBOSE,
3264 "mapped IOAPIC to %016lx (%016lx)\n",
3265 __fix_to_virt(idx), ioapic_phys);
3268 if (ioapic_res != NULL) {
3269 ioapic_res->start = ioapic_phys;
3270 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3276 static int __init ioapic_insert_resources(void)
3279 struct resource *r = ioapic_resources;
3283 "IO APIC resources could be not be allocated.\n");
3287 for (i = 0; i < nr_ioapics; i++) {
3288 insert_resource(&iomem_resource, r);
3295 /* Insert the IO APIC resources after PCI initialization has occured to handle
3296 * IO APICS that are mapped in on a BAR in PCI space. */
3297 late_initcall(ioapic_insert_resources);