1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/hpet.h>
8 #include <asm/io_apic.h>
11 #include <linux/intel-iommu.h>
12 #include "intr_remapping.h"
13 #include <acpi/acpi.h>
14 #include <asm/pci-direct.h>
17 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
18 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
19 static int ir_ioapic_num, ir_hpet_num;
20 int intr_remapping_enabled;
22 static int disable_intremap;
23 static __init int setup_nointremap(char *str)
28 early_param("nointremap", setup_nointremap);
31 struct intel_iommu *iommu;
37 #ifdef CONFIG_GENERIC_HARDIRQS
38 static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
40 struct irq_2_iommu *iommu;
42 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
43 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
48 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
50 struct irq_desc *desc;
52 desc = irq_to_desc(irq);
54 if (WARN_ON_ONCE(!desc))
57 return desc->irq_2_iommu;
60 static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
62 struct irq_desc *desc;
63 struct irq_2_iommu *irq_iommu;
66 * alloc irq desc if not allocated already.
68 desc = irq_to_desc_alloc_node(irq, node);
70 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
74 irq_iommu = desc->irq_2_iommu;
77 desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
79 return desc->irq_2_iommu;
82 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
84 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
87 #else /* !CONFIG_SPARSE_IRQ */
89 static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
91 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
94 return &irq_2_iommuX[irq];
98 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
100 return irq_2_iommu(irq);
104 static DEFINE_SPINLOCK(irq_2_ir_lock);
106 static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
108 struct irq_2_iommu *irq_iommu;
110 irq_iommu = irq_2_iommu(irq);
115 if (!irq_iommu->iommu)
121 int irq_remapped(int irq)
123 return valid_irq_2_iommu(irq) != NULL;
126 int get_irte(int irq, struct irte *entry)
129 struct irq_2_iommu *irq_iommu;
135 spin_lock_irqsave(&irq_2_ir_lock, flags);
136 irq_iommu = valid_irq_2_iommu(irq);
138 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
142 index = irq_iommu->irte_index + irq_iommu->sub_handle;
143 *entry = *(irq_iommu->iommu->ir_table->base + index);
145 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
149 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
151 struct ir_table *table = iommu->ir_table;
152 struct irq_2_iommu *irq_iommu;
153 u16 index, start_index;
154 unsigned int mask = 0;
161 #ifndef CONFIG_SPARSE_IRQ
162 /* protect irq_2_iommu_alloc later */
168 * start the IRTE search from index 0.
170 index = start_index = 0;
173 count = __roundup_pow_of_two(count);
177 if (mask > ecap_max_handle_mask(iommu->ecap)) {
179 "Requested mask %x exceeds the max invalidation handle"
180 " mask value %Lx\n", mask,
181 ecap_max_handle_mask(iommu->ecap));
185 spin_lock_irqsave(&irq_2_ir_lock, flags);
187 for (i = index; i < index + count; i++)
188 if (table->base[i].present)
190 /* empty index found */
191 if (i == index + count)
194 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
196 if (index == start_index) {
197 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
198 printk(KERN_ERR "can't allocate an IRTE\n");
203 for (i = index; i < index + count; i++)
204 table->base[i].present = 1;
206 irq_iommu = irq_2_iommu_alloc(irq);
208 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
209 printk(KERN_ERR "can't allocate irq_2_iommu\n");
213 irq_iommu->iommu = iommu;
214 irq_iommu->irte_index = index;
215 irq_iommu->sub_handle = 0;
216 irq_iommu->irte_mask = mask;
218 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
223 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
227 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
231 return qi_submit_sync(&desc, iommu);
234 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
237 struct irq_2_iommu *irq_iommu;
240 spin_lock_irqsave(&irq_2_ir_lock, flags);
241 irq_iommu = valid_irq_2_iommu(irq);
243 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
247 *sub_handle = irq_iommu->sub_handle;
248 index = irq_iommu->irte_index;
249 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
253 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
255 struct irq_2_iommu *irq_iommu;
258 spin_lock_irqsave(&irq_2_ir_lock, flags);
260 irq_iommu = irq_2_iommu_alloc(irq);
263 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
264 printk(KERN_ERR "can't allocate irq_2_iommu\n");
268 irq_iommu->iommu = iommu;
269 irq_iommu->irte_index = index;
270 irq_iommu->sub_handle = subhandle;
271 irq_iommu->irte_mask = 0;
273 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
278 int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
280 struct irq_2_iommu *irq_iommu;
283 spin_lock_irqsave(&irq_2_ir_lock, flags);
284 irq_iommu = valid_irq_2_iommu(irq);
286 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
290 irq_iommu->iommu = NULL;
291 irq_iommu->irte_index = 0;
292 irq_iommu->sub_handle = 0;
293 irq_2_iommu(irq)->irte_mask = 0;
295 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
300 int modify_irte(int irq, struct irte *irte_modified)
305 struct intel_iommu *iommu;
306 struct irq_2_iommu *irq_iommu;
309 spin_lock_irqsave(&irq_2_ir_lock, flags);
310 irq_iommu = valid_irq_2_iommu(irq);
312 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
316 iommu = irq_iommu->iommu;
318 index = irq_iommu->irte_index + irq_iommu->sub_handle;
319 irte = &iommu->ir_table->base[index];
321 set_64bit((unsigned long *)&irte->low, irte_modified->low);
322 set_64bit((unsigned long *)&irte->high, irte_modified->high);
323 __iommu_flush_cache(iommu, irte, sizeof(*irte));
325 rc = qi_flush_iec(iommu, index, 0);
326 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
331 int flush_irte(int irq)
335 struct intel_iommu *iommu;
336 struct irq_2_iommu *irq_iommu;
339 spin_lock_irqsave(&irq_2_ir_lock, flags);
340 irq_iommu = valid_irq_2_iommu(irq);
342 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
346 iommu = irq_iommu->iommu;
348 index = irq_iommu->irte_index + irq_iommu->sub_handle;
350 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
351 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
356 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
360 for (i = 0; i < MAX_HPET_TBS; i++)
361 if (ir_hpet[i].id == hpet_id)
362 return ir_hpet[i].iommu;
366 struct intel_iommu *map_ioapic_to_ir(int apic)
370 for (i = 0; i < MAX_IO_APICS; i++)
371 if (ir_ioapic[i].id == apic)
372 return ir_ioapic[i].iommu;
376 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
378 struct dmar_drhd_unit *drhd;
380 drhd = dmar_find_matched_drhd_unit(dev);
387 static int clear_entries(struct irq_2_iommu *irq_iommu)
389 struct irte *start, *entry, *end;
390 struct intel_iommu *iommu;
393 if (irq_iommu->sub_handle)
396 iommu = irq_iommu->iommu;
397 index = irq_iommu->irte_index + irq_iommu->sub_handle;
399 start = iommu->ir_table->base + index;
400 end = start + (1 << irq_iommu->irte_mask);
402 for (entry = start; entry < end; entry++) {
403 set_64bit((unsigned long *)&entry->low, 0);
404 set_64bit((unsigned long *)&entry->high, 0);
407 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
410 int free_irte(int irq)
413 struct irq_2_iommu *irq_iommu;
416 spin_lock_irqsave(&irq_2_ir_lock, flags);
417 irq_iommu = valid_irq_2_iommu(irq);
419 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
423 rc = clear_entries(irq_iommu);
425 irq_iommu->iommu = NULL;
426 irq_iommu->irte_index = 0;
427 irq_iommu->sub_handle = 0;
428 irq_iommu->irte_mask = 0;
430 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
436 * source validation type
438 #define SVT_NO_VERIFY 0x0 /* no verification is required */
439 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
440 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
443 * source-id qualifier
445 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
446 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
447 * the third least significant bit
449 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
450 * the second and third least significant bits
452 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
453 * the least three significant bits
457 * set SVT, SQ and SID fields of irte to verify
458 * source ids of interrupt requests
460 static void set_irte_sid(struct irte *irte, unsigned int svt,
461 unsigned int sq, unsigned int sid)
468 int set_ioapic_sid(struct irte *irte, int apic)
476 for (i = 0; i < MAX_IO_APICS; i++) {
477 if (ir_ioapic[i].id == apic) {
478 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
484 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
488 set_irte_sid(irte, 1, 0, sid);
493 int set_hpet_sid(struct irte *irte, u8 id)
501 for (i = 0; i < MAX_HPET_TBS; i++) {
502 if (ir_hpet[i].id == id) {
503 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
509 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
514 * Should really use SQ_ALL_16. Some platforms are broken.
515 * While we figure out the right quirks for these broken platforms, use
516 * SQ_13_IGNORE_3 for now.
518 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
523 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
525 struct pci_dev *bridge;
530 /* PCIe device or Root Complex integrated PCI device */
531 if (dev->is_pcie || !dev->bus->parent) {
532 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
533 (dev->bus->number << 8) | dev->devfn);
537 bridge = pci_find_upstream_pcie_bridge(dev);
539 if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
540 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
541 (bridge->bus->number << 8) | dev->bus->number);
542 else /* this is a legacy PCI bridge */
543 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
544 (bridge->bus->number << 8) | bridge->devfn);
550 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
556 addr = virt_to_phys((void *)iommu->ir_table->base);
558 spin_lock_irqsave(&iommu->register_lock, flags);
560 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
561 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
563 /* Set interrupt-remapping table pointer */
564 iommu->gcmd |= DMA_GCMD_SIRTP;
565 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
567 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
568 readl, (sts & DMA_GSTS_IRTPS), sts);
569 spin_unlock_irqrestore(&iommu->register_lock, flags);
572 * global invalidation of interrupt entry cache before enabling
573 * interrupt-remapping.
575 qi_global_iec(iommu);
577 spin_lock_irqsave(&iommu->register_lock, flags);
579 /* Enable interrupt-remapping */
580 iommu->gcmd |= DMA_GCMD_IRE;
581 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
583 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
584 readl, (sts & DMA_GSTS_IRES), sts);
586 spin_unlock_irqrestore(&iommu->register_lock, flags);
590 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
592 struct ir_table *ir_table;
595 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
598 if (!iommu->ir_table)
601 pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
604 printk(KERN_ERR "failed to allocate pages of order %d\n",
605 INTR_REMAP_PAGE_ORDER);
606 kfree(iommu->ir_table);
610 ir_table->base = page_address(pages);
612 iommu_set_intr_remapping(iommu, mode);
617 * Disable Interrupt Remapping.
619 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
624 if (!ecap_ir_support(iommu->ecap))
628 * global invalidation of interrupt entry cache before disabling
629 * interrupt-remapping.
631 qi_global_iec(iommu);
633 spin_lock_irqsave(&iommu->register_lock, flags);
635 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
636 if (!(sts & DMA_GSTS_IRES))
639 iommu->gcmd &= ~DMA_GCMD_IRE;
640 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
642 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
643 readl, !(sts & DMA_GSTS_IRES), sts);
646 spin_unlock_irqrestore(&iommu->register_lock, flags);
649 int __init intr_remapping_supported(void)
651 struct dmar_drhd_unit *drhd;
653 if (disable_intremap)
656 for_each_drhd_unit(drhd) {
657 struct intel_iommu *iommu = drhd->iommu;
659 if (!ecap_ir_support(iommu->ecap))
666 int __init enable_intr_remapping(int eim)
668 struct dmar_drhd_unit *drhd;
671 for_each_drhd_unit(drhd) {
672 struct intel_iommu *iommu = drhd->iommu;
675 * If the queued invalidation is already initialized,
676 * shouldn't disable it.
682 * Clear previous faults.
684 dmar_fault(-1, iommu);
687 * Disable intr remapping and queued invalidation, if already
688 * enabled prior to OS handover.
690 iommu_disable_intr_remapping(iommu);
692 dmar_disable_qi(iommu);
696 * check for the Interrupt-remapping support
698 for_each_drhd_unit(drhd) {
699 struct intel_iommu *iommu = drhd->iommu;
701 if (!ecap_ir_support(iommu->ecap))
704 if (eim && !ecap_eim_support(iommu->ecap)) {
705 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
706 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
712 * Enable queued invalidation for all the DRHD's.
714 for_each_drhd_unit(drhd) {
716 struct intel_iommu *iommu = drhd->iommu;
717 ret = dmar_enable_qi(iommu);
720 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
721 " invalidation, ecap %Lx, ret %d\n",
722 drhd->reg_base_addr, iommu->ecap, ret);
728 * Setup Interrupt-remapping for all the DRHD's now.
730 for_each_drhd_unit(drhd) {
731 struct intel_iommu *iommu = drhd->iommu;
733 if (!ecap_ir_support(iommu->ecap))
736 if (setup_intr_remapping(iommu, eim))
745 intr_remapping_enabled = 1;
751 * handle error condition gracefully here!
756 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
757 struct intel_iommu *iommu)
759 struct acpi_dmar_pci_path *path;
764 path = (struct acpi_dmar_pci_path *)(scope + 1);
765 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
766 / sizeof(struct acpi_dmar_pci_path);
768 while (--count > 0) {
770 * Access PCI directly due to the PCI
771 * subsystem isn't initialized yet.
773 bus = read_pci_config_byte(bus, path->dev, path->fn,
777 ir_hpet[ir_hpet_num].bus = bus;
778 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
779 ir_hpet[ir_hpet_num].iommu = iommu;
780 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
784 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
785 struct intel_iommu *iommu)
787 struct acpi_dmar_pci_path *path;
792 path = (struct acpi_dmar_pci_path *)(scope + 1);
793 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
794 / sizeof(struct acpi_dmar_pci_path);
796 while (--count > 0) {
798 * Access PCI directly due to the PCI
799 * subsystem isn't initialized yet.
801 bus = read_pci_config_byte(bus, path->dev, path->fn,
806 ir_ioapic[ir_ioapic_num].bus = bus;
807 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
808 ir_ioapic[ir_ioapic_num].iommu = iommu;
809 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
813 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
814 struct intel_iommu *iommu)
816 struct acpi_dmar_hardware_unit *drhd;
817 struct acpi_dmar_device_scope *scope;
820 drhd = (struct acpi_dmar_hardware_unit *)header;
822 start = (void *)(drhd + 1);
823 end = ((void *)drhd) + header->length;
825 while (start < end) {
827 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
828 if (ir_ioapic_num == MAX_IO_APICS) {
829 printk(KERN_WARNING "Exceeded Max IO APICS\n");
833 printk(KERN_INFO "IOAPIC id %d under DRHD base"
834 " 0x%Lx\n", scope->enumeration_id,
837 ir_parse_one_ioapic_scope(scope, iommu);
838 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
839 if (ir_hpet_num == MAX_HPET_TBS) {
840 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
844 printk(KERN_INFO "HPET id %d under DRHD base"
845 " 0x%Lx\n", scope->enumeration_id,
848 ir_parse_one_hpet_scope(scope, iommu);
850 start += scope->length;
857 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
860 int __init parse_ioapics_under_ir(void)
862 struct dmar_drhd_unit *drhd;
863 int ir_supported = 0;
865 for_each_drhd_unit(drhd) {
866 struct intel_iommu *iommu = drhd->iommu;
868 if (ecap_ir_support(iommu->ecap)) {
869 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
876 if (ir_supported && ir_ioapic_num != nr_ioapics) {
878 "Not all IO-APIC's listed under remapping hardware\n");
885 void disable_intr_remapping(void)
887 struct dmar_drhd_unit *drhd;
888 struct intel_iommu *iommu = NULL;
891 * Disable Interrupt-remapping for all the DRHD's now.
893 for_each_iommu(iommu, drhd) {
894 if (!ecap_ir_support(iommu->ecap))
897 iommu_disable_intr_remapping(iommu);
901 int reenable_intr_remapping(int eim)
903 struct dmar_drhd_unit *drhd;
905 struct intel_iommu *iommu = NULL;
907 for_each_iommu(iommu, drhd)
909 dmar_reenable_qi(iommu);
912 * Setup Interrupt-remapping for all the DRHD's now.
914 for_each_iommu(iommu, drhd) {
915 if (!ecap_ir_support(iommu->ecap))
918 /* Set up interrupt remapping for iommu.*/
919 iommu_set_intr_remapping(iommu, eim);
930 * handle error condition gracefully here!