1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include "intr_remapping.h"
14 #include <acpi/acpi.h>
15 #include <asm/pci-direct.h>
18 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
19 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
20 static int ir_ioapic_num, ir_hpet_num;
21 int intr_remapping_enabled;
23 static int disable_intremap;
24 static int disable_sourceid_checking;
26 static __init int setup_nointremap(char *str)
31 early_param("nointremap", setup_nointremap);
33 static __init int setup_intremap(char *str)
38 if (!strncmp(str, "on", 2))
40 else if (!strncmp(str, "off", 3))
42 else if (!strncmp(str, "nosid", 5))
43 disable_sourceid_checking = 1;
47 early_param("intremap", setup_intremap);
49 static DEFINE_SPINLOCK(irq_2_ir_lock);
51 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
53 struct irq_cfg *cfg = get_irq_chip_data(irq);
54 return cfg ? &cfg->irq_2_iommu : NULL;
57 int irq_remapped(int irq)
59 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
61 return irq_iommu ? irq_iommu->iommu != NULL : 0;
64 int get_irte(int irq, struct irte *entry)
66 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
70 if (!entry || !irq_iommu)
73 spin_lock_irqsave(&irq_2_ir_lock, flags);
75 index = irq_iommu->irte_index + irq_iommu->sub_handle;
76 *entry = *(irq_iommu->iommu->ir_table->base + index);
78 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
82 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
84 struct ir_table *table = iommu->ir_table;
85 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
86 u16 index, start_index;
87 unsigned int mask = 0;
91 if (!count || !irq_iommu)
95 * start the IRTE search from index 0.
97 index = start_index = 0;
100 count = __roundup_pow_of_two(count);
104 if (mask > ecap_max_handle_mask(iommu->ecap)) {
106 "Requested mask %x exceeds the max invalidation handle"
107 " mask value %Lx\n", mask,
108 ecap_max_handle_mask(iommu->ecap));
112 spin_lock_irqsave(&irq_2_ir_lock, flags);
114 for (i = index; i < index + count; i++)
115 if (table->base[i].present)
117 /* empty index found */
118 if (i == index + count)
121 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
123 if (index == start_index) {
124 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
125 printk(KERN_ERR "can't allocate an IRTE\n");
130 for (i = index; i < index + count; i++)
131 table->base[i].present = 1;
133 irq_iommu->iommu = iommu;
134 irq_iommu->irte_index = index;
135 irq_iommu->sub_handle = 0;
136 irq_iommu->irte_mask = mask;
138 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
143 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
147 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
151 return qi_submit_sync(&desc, iommu);
154 int map_irq_to_irte_handle(int irq, u16 *sub_handle)
156 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
163 spin_lock_irqsave(&irq_2_ir_lock, flags);
164 *sub_handle = irq_iommu->sub_handle;
165 index = irq_iommu->irte_index;
166 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
170 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
172 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
178 spin_lock_irqsave(&irq_2_ir_lock, flags);
180 irq_iommu->iommu = iommu;
181 irq_iommu->irte_index = index;
182 irq_iommu->sub_handle = subhandle;
183 irq_iommu->irte_mask = 0;
185 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
190 int modify_irte(int irq, struct irte *irte_modified)
192 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
193 struct intel_iommu *iommu;
201 spin_lock_irqsave(&irq_2_ir_lock, flags);
203 iommu = irq_iommu->iommu;
205 index = irq_iommu->irte_index + irq_iommu->sub_handle;
206 irte = &iommu->ir_table->base[index];
208 set_64bit(&irte->low, irte_modified->low);
209 set_64bit(&irte->high, irte_modified->high);
210 __iommu_flush_cache(iommu, irte, sizeof(*irte));
212 rc = qi_flush_iec(iommu, index, 0);
213 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
218 struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
222 for (i = 0; i < MAX_HPET_TBS; i++)
223 if (ir_hpet[i].id == hpet_id)
224 return ir_hpet[i].iommu;
228 struct intel_iommu *map_ioapic_to_ir(int apic)
232 for (i = 0; i < MAX_IO_APICS; i++)
233 if (ir_ioapic[i].id == apic)
234 return ir_ioapic[i].iommu;
238 struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
240 struct dmar_drhd_unit *drhd;
242 drhd = dmar_find_matched_drhd_unit(dev);
249 static int clear_entries(struct irq_2_iommu *irq_iommu)
251 struct irte *start, *entry, *end;
252 struct intel_iommu *iommu;
255 if (irq_iommu->sub_handle)
258 iommu = irq_iommu->iommu;
259 index = irq_iommu->irte_index + irq_iommu->sub_handle;
261 start = iommu->ir_table->base + index;
262 end = start + (1 << irq_iommu->irte_mask);
264 for (entry = start; entry < end; entry++) {
265 set_64bit(&entry->low, 0);
266 set_64bit(&entry->high, 0);
269 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
272 int free_irte(int irq)
274 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
281 spin_lock_irqsave(&irq_2_ir_lock, flags);
283 rc = clear_entries(irq_iommu);
285 irq_iommu->iommu = NULL;
286 irq_iommu->irte_index = 0;
287 irq_iommu->sub_handle = 0;
288 irq_iommu->irte_mask = 0;
290 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
296 * source validation type
298 #define SVT_NO_VERIFY 0x0 /* no verification is required */
299 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
300 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
303 * source-id qualifier
305 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
306 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
307 * the third least significant bit
309 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
310 * the second and third least significant bits
312 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
313 * the least three significant bits
317 * set SVT, SQ and SID fields of irte to verify
318 * source ids of interrupt requests
320 static void set_irte_sid(struct irte *irte, unsigned int svt,
321 unsigned int sq, unsigned int sid)
323 if (disable_sourceid_checking)
330 int set_ioapic_sid(struct irte *irte, int apic)
338 for (i = 0; i < MAX_IO_APICS; i++) {
339 if (ir_ioapic[i].id == apic) {
340 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
346 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
350 set_irte_sid(irte, 1, 0, sid);
355 int set_hpet_sid(struct irte *irte, u8 id)
363 for (i = 0; i < MAX_HPET_TBS; i++) {
364 if (ir_hpet[i].id == id) {
365 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
371 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
376 * Should really use SQ_ALL_16. Some platforms are broken.
377 * While we figure out the right quirks for these broken platforms, use
378 * SQ_13_IGNORE_3 for now.
380 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
385 int set_msi_sid(struct irte *irte, struct pci_dev *dev)
387 struct pci_dev *bridge;
392 /* PCIe device or Root Complex integrated PCI device */
393 if (pci_is_pcie(dev) || !dev->bus->parent) {
394 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
395 (dev->bus->number << 8) | dev->devfn);
399 bridge = pci_find_upstream_pcie_bridge(dev);
401 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
402 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
403 (bridge->bus->number << 8) | dev->bus->number);
404 else /* this is a legacy PCI bridge */
405 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
406 (bridge->bus->number << 8) | bridge->devfn);
412 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
418 addr = virt_to_phys((void *)iommu->ir_table->base);
420 spin_lock_irqsave(&iommu->register_lock, flags);
422 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
423 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
425 /* Set interrupt-remapping table pointer */
426 iommu->gcmd |= DMA_GCMD_SIRTP;
427 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
429 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
430 readl, (sts & DMA_GSTS_IRTPS), sts);
431 spin_unlock_irqrestore(&iommu->register_lock, flags);
434 * global invalidation of interrupt entry cache before enabling
435 * interrupt-remapping.
437 qi_global_iec(iommu);
439 spin_lock_irqsave(&iommu->register_lock, flags);
441 /* Enable interrupt-remapping */
442 iommu->gcmd |= DMA_GCMD_IRE;
443 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
445 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
446 readl, (sts & DMA_GSTS_IRES), sts);
448 spin_unlock_irqrestore(&iommu->register_lock, flags);
452 static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
454 struct ir_table *ir_table;
457 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
460 if (!iommu->ir_table)
463 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
464 INTR_REMAP_PAGE_ORDER);
467 printk(KERN_ERR "failed to allocate pages of order %d\n",
468 INTR_REMAP_PAGE_ORDER);
469 kfree(iommu->ir_table);
473 ir_table->base = page_address(pages);
475 iommu_set_intr_remapping(iommu, mode);
480 * Disable Interrupt Remapping.
482 static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
487 if (!ecap_ir_support(iommu->ecap))
491 * global invalidation of interrupt entry cache before disabling
492 * interrupt-remapping.
494 qi_global_iec(iommu);
496 spin_lock_irqsave(&iommu->register_lock, flags);
498 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
499 if (!(sts & DMA_GSTS_IRES))
502 iommu->gcmd &= ~DMA_GCMD_IRE;
503 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
505 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
506 readl, !(sts & DMA_GSTS_IRES), sts);
509 spin_unlock_irqrestore(&iommu->register_lock, flags);
512 int __init intr_remapping_supported(void)
514 struct dmar_drhd_unit *drhd;
516 if (disable_intremap)
519 if (!dmar_ir_support())
522 for_each_drhd_unit(drhd) {
523 struct intel_iommu *iommu = drhd->iommu;
525 if (!ecap_ir_support(iommu->ecap))
532 int __init enable_intr_remapping(int eim)
534 struct dmar_drhd_unit *drhd;
537 if (parse_ioapics_under_ir() != 1) {
538 printk(KERN_INFO "Not enable interrupt remapping\n");
542 for_each_drhd_unit(drhd) {
543 struct intel_iommu *iommu = drhd->iommu;
546 * If the queued invalidation is already initialized,
547 * shouldn't disable it.
553 * Clear previous faults.
555 dmar_fault(-1, iommu);
558 * Disable intr remapping and queued invalidation, if already
559 * enabled prior to OS handover.
561 iommu_disable_intr_remapping(iommu);
563 dmar_disable_qi(iommu);
567 * check for the Interrupt-remapping support
569 for_each_drhd_unit(drhd) {
570 struct intel_iommu *iommu = drhd->iommu;
572 if (!ecap_ir_support(iommu->ecap))
575 if (eim && !ecap_eim_support(iommu->ecap)) {
576 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
577 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
583 * Enable queued invalidation for all the DRHD's.
585 for_each_drhd_unit(drhd) {
587 struct intel_iommu *iommu = drhd->iommu;
588 ret = dmar_enable_qi(iommu);
591 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
592 " invalidation, ecap %Lx, ret %d\n",
593 drhd->reg_base_addr, iommu->ecap, ret);
599 * Setup Interrupt-remapping for all the DRHD's now.
601 for_each_drhd_unit(drhd) {
602 struct intel_iommu *iommu = drhd->iommu;
604 if (!ecap_ir_support(iommu->ecap))
607 if (setup_intr_remapping(iommu, eim))
616 intr_remapping_enabled = 1;
622 * handle error condition gracefully here!
627 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
628 struct intel_iommu *iommu)
630 struct acpi_dmar_pci_path *path;
635 path = (struct acpi_dmar_pci_path *)(scope + 1);
636 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
637 / sizeof(struct acpi_dmar_pci_path);
639 while (--count > 0) {
641 * Access PCI directly due to the PCI
642 * subsystem isn't initialized yet.
644 bus = read_pci_config_byte(bus, path->dev, path->fn,
648 ir_hpet[ir_hpet_num].bus = bus;
649 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
650 ir_hpet[ir_hpet_num].iommu = iommu;
651 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
655 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
656 struct intel_iommu *iommu)
658 struct acpi_dmar_pci_path *path;
663 path = (struct acpi_dmar_pci_path *)(scope + 1);
664 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
665 / sizeof(struct acpi_dmar_pci_path);
667 while (--count > 0) {
669 * Access PCI directly due to the PCI
670 * subsystem isn't initialized yet.
672 bus = read_pci_config_byte(bus, path->dev, path->fn,
677 ir_ioapic[ir_ioapic_num].bus = bus;
678 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
679 ir_ioapic[ir_ioapic_num].iommu = iommu;
680 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
684 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
685 struct intel_iommu *iommu)
687 struct acpi_dmar_hardware_unit *drhd;
688 struct acpi_dmar_device_scope *scope;
691 drhd = (struct acpi_dmar_hardware_unit *)header;
693 start = (void *)(drhd + 1);
694 end = ((void *)drhd) + header->length;
696 while (start < end) {
698 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
699 if (ir_ioapic_num == MAX_IO_APICS) {
700 printk(KERN_WARNING "Exceeded Max IO APICS\n");
704 printk(KERN_INFO "IOAPIC id %d under DRHD base "
705 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
706 drhd->address, iommu->seq_id);
708 ir_parse_one_ioapic_scope(scope, iommu);
709 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
710 if (ir_hpet_num == MAX_HPET_TBS) {
711 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
715 printk(KERN_INFO "HPET id %d under DRHD base"
716 " 0x%Lx\n", scope->enumeration_id,
719 ir_parse_one_hpet_scope(scope, iommu);
721 start += scope->length;
728 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
731 int __init parse_ioapics_under_ir(void)
733 struct dmar_drhd_unit *drhd;
734 int ir_supported = 0;
736 for_each_drhd_unit(drhd) {
737 struct intel_iommu *iommu = drhd->iommu;
739 if (ecap_ir_support(iommu->ecap)) {
740 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
747 if (ir_supported && ir_ioapic_num != nr_ioapics) {
749 "Not all IO-APIC's listed under remapping hardware\n");
756 void disable_intr_remapping(void)
758 struct dmar_drhd_unit *drhd;
759 struct intel_iommu *iommu = NULL;
762 * Disable Interrupt-remapping for all the DRHD's now.
764 for_each_iommu(iommu, drhd) {
765 if (!ecap_ir_support(iommu->ecap))
768 iommu_disable_intr_remapping(iommu);
772 int reenable_intr_remapping(int eim)
774 struct dmar_drhd_unit *drhd;
776 struct intel_iommu *iommu = NULL;
778 for_each_iommu(iommu, drhd)
780 dmar_reenable_qi(iommu);
783 * Setup Interrupt-remapping for all the DRHD's now.
785 for_each_iommu(iommu, drhd) {
786 if (!ecap_ir_support(iommu->ecap))
789 /* Set up interrupt remapping for iommu.*/
790 iommu_set_intr_remapping(iommu, eim);
801 * handle error condition gracefully here!