2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
26 * These routines are used by both DMA-remapping and Interrupt-remapping
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
39 #define PREFIX "DMAR: "
41 /* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
45 LIST_HEAD(dmar_drhd_units);
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
59 list_add(&drhd->list, &dmar_drhd_units);
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
79 * Some BIOSes list non-exist devices in DMAR table, just
84 PREFIX "Device scope bus [%d] not found\n",
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
97 bus = pdev->subordinate;
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
122 struct acpi_dmar_device_scope *scope;
128 while (start < end) {
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
137 start += scope->length;
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
148 while (start < end) {
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
160 start += scope->length;
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
172 dmar_parse_one_drhd(struct acpi_dmar_header *header)
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
178 drhd = (struct acpi_dmar_hardware_unit *)header;
179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
184 dmaru->reg_base_addr = drhd->address;
185 dmaru->segment = drhd->segment;
186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
188 ret = alloc_iommu(dmaru);
193 dmar_register_drhd_unit(dmaru);
197 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
199 struct acpi_dmar_hardware_unit *drhd;
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
204 if (dmaru->include_all)
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
208 ((void *)drhd) + drhd->header.length,
209 &dmaru->devices_cnt, &dmaru->devices,
212 list_del(&dmaru->list);
219 LIST_HEAD(dmar_rmrr_units);
221 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
223 list_add(&rmrr->list, &dmar_rmrr_units);
228 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
242 dmar_register_rmrr_unit(rmrru);
247 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
249 struct acpi_dmar_reserved_memory *rmrr;
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
254 ((void *)rmrr) + rmrr->header.length,
255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
264 static LIST_HEAD(dmar_atsr_units);
266 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
277 atsru->include_all = atsr->flags & 0x1;
279 list_add(&atsru->list, &dmar_atsr_units);
284 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
287 struct acpi_dmar_atsr *atsr;
289 if (atsru->include_all)
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
305 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
312 dev = pci_physfn(dev);
314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
326 if (!bridge || !pci_is_pcie(bridge) ||
327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
338 if (atsru->include_all)
345 #ifdef CONFIG_ACPI_NUMA
347 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
352 rhsa = (struct acpi_dmar_rhsa *)header;
353 for_each_drhd_unit(drhd) {
354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
357 if (!node_online(node))
359 drhd->iommu->node = node;
363 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
366 dmi_get_system_info(DMI_BIOS_VENDOR),
367 dmi_get_system_info(DMI_BIOS_VERSION),
368 dmi_get_system_info(DMI_PRODUCT_VERSION));
375 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
377 struct acpi_dmar_hardware_unit *drhd;
378 struct acpi_dmar_reserved_memory *rmrr;
379 struct acpi_dmar_atsr *atsr;
380 struct acpi_dmar_rhsa *rhsa;
382 switch (header->type) {
383 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
384 drhd = container_of(header, struct acpi_dmar_hardware_unit,
386 printk (KERN_INFO PREFIX
387 "DRHD base: %#016Lx flags: %#x\n",
388 (unsigned long long)drhd->address, drhd->flags);
390 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
391 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
393 printk (KERN_INFO PREFIX
394 "RMRR base: %#016Lx end: %#016Lx\n",
395 (unsigned long long)rmrr->base_address,
396 (unsigned long long)rmrr->end_address);
398 case ACPI_DMAR_TYPE_ATSR:
399 atsr = container_of(header, struct acpi_dmar_atsr, header);
400 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
402 case ACPI_DMAR_HARDWARE_AFFINITY:
403 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
404 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
405 (unsigned long long)rhsa->base_address,
406 rhsa->proximity_domain);
412 * dmar_table_detect - checks to see if the platform supports DMAR devices
414 static int __init dmar_table_detect(void)
416 acpi_status status = AE_OK;
418 /* if we could find DMAR table, then there are DMAR devices */
419 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
420 (struct acpi_table_header **)&dmar_tbl,
423 if (ACPI_SUCCESS(status) && !dmar_tbl) {
424 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
425 status = AE_NOT_FOUND;
428 return (ACPI_SUCCESS(status) ? 1 : 0);
432 * parse_dmar_table - parses the DMA reporting table
435 parse_dmar_table(void)
437 struct acpi_table_dmar *dmar;
438 struct acpi_dmar_header *entry_header;
442 * Do it again, earlier dmar_tbl mapping could be mapped with
448 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
449 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
451 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
453 dmar = (struct acpi_table_dmar *)dmar_tbl;
457 if (dmar->width < PAGE_SHIFT - 1) {
458 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
462 printk (KERN_INFO PREFIX "Host address width %d\n",
465 entry_header = (struct acpi_dmar_header *)(dmar + 1);
466 while (((unsigned long)entry_header) <
467 (((unsigned long)dmar) + dmar_tbl->length)) {
468 /* Avoid looping forever on bad ACPI tables */
469 if (entry_header->length == 0) {
470 printk(KERN_WARNING PREFIX
471 "Invalid 0-length structure\n");
476 dmar_table_print_dmar_entry(entry_header);
478 switch (entry_header->type) {
479 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
480 ret = dmar_parse_one_drhd(entry_header);
482 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
484 ret = dmar_parse_one_rmrr(entry_header);
487 case ACPI_DMAR_TYPE_ATSR:
489 ret = dmar_parse_one_atsr(entry_header);
492 case ACPI_DMAR_HARDWARE_AFFINITY:
493 #ifdef CONFIG_ACPI_NUMA
494 ret = dmar_parse_one_rhsa(entry_header);
498 printk(KERN_WARNING PREFIX
499 "Unknown DMAR structure type %d\n",
501 ret = 0; /* for forward compatibility */
507 entry_header = ((void *)entry_header + entry_header->length);
512 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
518 for (index = 0; index < cnt; index++)
519 if (dev == devices[index])
522 /* Check our parent */
523 dev = dev->bus->self;
529 struct dmar_drhd_unit *
530 dmar_find_matched_drhd_unit(struct pci_dev *dev)
532 struct dmar_drhd_unit *dmaru = NULL;
533 struct acpi_dmar_hardware_unit *drhd;
535 dev = pci_physfn(dev);
537 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
538 drhd = container_of(dmaru->hdr,
539 struct acpi_dmar_hardware_unit,
542 if (dmaru->include_all &&
543 drhd->segment == pci_domain_nr(dev->bus))
546 if (dmar_pci_device_match(dmaru->devices,
547 dmaru->devices_cnt, dev))
554 int __init dmar_dev_scope_init(void)
556 struct dmar_drhd_unit *drhd, *drhd_n;
559 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
560 ret = dmar_parse_dev(drhd);
567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
568 struct dmar_atsr_unit *atsr, *atsr_n;
570 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
571 ret = rmrr_parse_dev(rmrr);
576 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
577 ret = atsr_parse_dev(atsr);
588 int __init dmar_table_init(void)
590 static int dmar_table_initialized;
593 if (dmar_table_initialized)
596 dmar_table_initialized = 1;
598 ret = parse_dmar_table();
601 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
605 if (list_empty(&dmar_drhd_units)) {
606 printk(KERN_INFO PREFIX "No DMAR devices found\n");
611 if (list_empty(&dmar_rmrr_units))
612 printk(KERN_INFO PREFIX "No RMRR found\n");
614 if (list_empty(&dmar_atsr_units))
615 printk(KERN_INFO PREFIX "No ATSR found\n");
621 static void warn_invalid_dmar(u64 addr, const char *message)
623 WARN_ONCE(1, "Your BIOS is broken; DMAR reported at address %llx%s!\n"
624 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
626 dmi_get_system_info(DMI_BIOS_VENDOR),
627 dmi_get_system_info(DMI_BIOS_VERSION),
628 dmi_get_system_info(DMI_PRODUCT_VERSION));
631 int __init check_zero_address(void)
633 struct acpi_table_dmar *dmar;
634 struct acpi_dmar_header *entry_header;
635 struct acpi_dmar_hardware_unit *drhd;
637 dmar = (struct acpi_table_dmar *)dmar_tbl;
638 entry_header = (struct acpi_dmar_header *)(dmar + 1);
640 while (((unsigned long)entry_header) <
641 (((unsigned long)dmar) + dmar_tbl->length)) {
642 /* Avoid looping forever on bad ACPI tables */
643 if (entry_header->length == 0) {
644 printk(KERN_WARNING PREFIX
645 "Invalid 0-length structure\n");
649 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
653 drhd = (void *)entry_header;
654 if (!drhd->address) {
655 warn_invalid_dmar(0, "");
659 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
661 printk("IOMMU: can't validate: %llx\n", drhd->address);
664 cap = dmar_readq(addr + DMAR_CAP_REG);
665 ecap = dmar_readq(addr + DMAR_ECAP_REG);
666 early_iounmap(addr, VTD_PAGE_SIZE);
667 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
668 warn_invalid_dmar(drhd->address,
669 " returns all ones");
674 entry_header = ((void *)entry_header + entry_header->length);
685 void __init detect_intel_iommu(void)
689 ret = dmar_table_detect();
691 ret = check_zero_address();
693 #ifdef CONFIG_INTR_REMAP
694 struct acpi_table_dmar *dmar;
696 * for now we will disable dma-remapping when interrupt
697 * remapping is enabled.
698 * When support for queued invalidation for IOTLB invalidation
699 * is added, we will not need this any more.
701 dmar = (struct acpi_table_dmar *) dmar_tbl;
702 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
704 "Queued invalidation will be enabled to support "
705 "x2apic and Intr-remapping.\n");
708 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
710 /* Make sure ACS will be enabled */
716 x86_init.iommu.iommu_init = intel_iommu_init;
719 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
724 int alloc_iommu(struct dmar_drhd_unit *drhd)
726 struct intel_iommu *iommu;
729 static int iommu_allocated = 0;
733 if (!drhd->reg_base_addr) {
734 warn_invalid_dmar(0, "");
738 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
742 iommu->seq_id = iommu_allocated++;
743 sprintf (iommu->name, "dmar%d", iommu->seq_id);
745 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
747 printk(KERN_ERR "IOMMU: can't map the region\n");
750 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
751 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
753 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
754 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
759 agaw = iommu_calculate_agaw(iommu);
762 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
766 msagaw = iommu_calculate_max_sagaw(iommu);
769 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
775 iommu->msagaw = msagaw;
779 /* the registers might be more than one page */
780 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
781 cap_max_fault_reg_offset(iommu->cap));
782 map_size = VTD_PAGE_ALIGN(map_size);
783 if (map_size > VTD_PAGE_SIZE) {
785 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
787 printk(KERN_ERR "IOMMU: can't map the region\n");
792 ver = readl(iommu->reg + DMAR_VER_REG);
793 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
795 (unsigned long long)drhd->reg_base_addr,
796 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
797 (unsigned long long)iommu->cap,
798 (unsigned long long)iommu->ecap);
800 spin_lock_init(&iommu->register_lock);
812 void free_iommu(struct intel_iommu *iommu)
818 free_dmar_iommu(iommu);
827 * Reclaim all the submitted descriptors which have completed its work.
829 static inline void reclaim_free_desc(struct q_inval *qi)
831 while (qi->desc_status[qi->free_tail] == QI_DONE ||
832 qi->desc_status[qi->free_tail] == QI_ABORT) {
833 qi->desc_status[qi->free_tail] = QI_FREE;
834 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
839 static int qi_check_fault(struct intel_iommu *iommu, int index)
843 struct q_inval *qi = iommu->qi;
844 int wait_index = (index + 1) % QI_LENGTH;
846 if (qi->desc_status[wait_index] == QI_ABORT)
849 fault = readl(iommu->reg + DMAR_FSTS_REG);
852 * If IQE happens, the head points to the descriptor associated
853 * with the error. No new descriptors are fetched until the IQE
856 if (fault & DMA_FSTS_IQE) {
857 head = readl(iommu->reg + DMAR_IQH_REG);
858 if ((head >> DMAR_IQ_SHIFT) == index) {
859 printk(KERN_ERR "VT-d detected invalid descriptor: "
860 "low=%llx, high=%llx\n",
861 (unsigned long long)qi->desc[index].low,
862 (unsigned long long)qi->desc[index].high);
863 memcpy(&qi->desc[index], &qi->desc[wait_index],
864 sizeof(struct qi_desc));
865 __iommu_flush_cache(iommu, &qi->desc[index],
866 sizeof(struct qi_desc));
867 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
873 * If ITE happens, all pending wait_desc commands are aborted.
874 * No new descriptors are fetched until the ITE is cleared.
876 if (fault & DMA_FSTS_ITE) {
877 head = readl(iommu->reg + DMAR_IQH_REG);
878 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
880 tail = readl(iommu->reg + DMAR_IQT_REG);
881 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
883 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
886 if (qi->desc_status[head] == QI_IN_USE)
887 qi->desc_status[head] = QI_ABORT;
888 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
889 } while (head != tail);
891 if (qi->desc_status[wait_index] == QI_ABORT)
895 if (fault & DMA_FSTS_ICE)
896 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
902 * Submit the queued invalidation descriptor to the remapping
903 * hardware unit and wait for its completion.
905 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
908 struct q_inval *qi = iommu->qi;
909 struct qi_desc *hw, wait_desc;
910 int wait_index, index;
921 spin_lock_irqsave(&qi->q_lock, flags);
922 while (qi->free_cnt < 3) {
923 spin_unlock_irqrestore(&qi->q_lock, flags);
925 spin_lock_irqsave(&qi->q_lock, flags);
928 index = qi->free_head;
929 wait_index = (index + 1) % QI_LENGTH;
931 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
935 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
936 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
937 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
939 hw[wait_index] = wait_desc;
941 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
942 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
944 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
948 * update the HW tail register indicating the presence of
951 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
953 while (qi->desc_status[wait_index] != QI_DONE) {
955 * We will leave the interrupts disabled, to prevent interrupt
956 * context to queue another cmd while a cmd is already submitted
957 * and waiting for completion on this cpu. This is to avoid
958 * a deadlock where the interrupt context can wait indefinitely
959 * for free slots in the queue.
961 rc = qi_check_fault(iommu, index);
965 spin_unlock(&qi->q_lock);
967 spin_lock(&qi->q_lock);
970 qi->desc_status[index] = QI_DONE;
972 reclaim_free_desc(qi);
973 spin_unlock_irqrestore(&qi->q_lock, flags);
982 * Flush the global interrupt entry cache.
984 void qi_global_iec(struct intel_iommu *iommu)
988 desc.low = QI_IEC_TYPE;
991 /* should never fail */
992 qi_submit_sync(&desc, iommu);
995 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1000 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1001 | QI_CC_GRAN(type) | QI_CC_TYPE;
1004 qi_submit_sync(&desc, iommu);
1007 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1008 unsigned int size_order, u64 type)
1012 struct qi_desc desc;
1015 if (cap_write_drain(iommu->cap))
1018 if (cap_read_drain(iommu->cap))
1021 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1022 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1023 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1024 | QI_IOTLB_AM(size_order);
1026 qi_submit_sync(&desc, iommu);
1029 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1030 u64 addr, unsigned mask)
1032 struct qi_desc desc;
1035 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1036 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1037 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1039 desc.high = QI_DEV_IOTLB_ADDR(addr);
1041 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1044 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1047 qi_submit_sync(&desc, iommu);
1051 * Disable Queued Invalidation interface.
1053 void dmar_disable_qi(struct intel_iommu *iommu)
1055 unsigned long flags;
1057 cycles_t start_time = get_cycles();
1059 if (!ecap_qis(iommu->ecap))
1062 spin_lock_irqsave(&iommu->register_lock, flags);
1064 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1065 if (!(sts & DMA_GSTS_QIES))
1069 * Give a chance to HW to complete the pending invalidation requests.
1071 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1072 readl(iommu->reg + DMAR_IQH_REG)) &&
1073 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1076 iommu->gcmd &= ~DMA_GCMD_QIE;
1077 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1079 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1080 !(sts & DMA_GSTS_QIES), sts);
1082 spin_unlock_irqrestore(&iommu->register_lock, flags);
1086 * Enable queued invalidation.
1088 static void __dmar_enable_qi(struct intel_iommu *iommu)
1091 unsigned long flags;
1092 struct q_inval *qi = iommu->qi;
1094 qi->free_head = qi->free_tail = 0;
1095 qi->free_cnt = QI_LENGTH;
1097 spin_lock_irqsave(&iommu->register_lock, flags);
1099 /* write zero to the tail reg */
1100 writel(0, iommu->reg + DMAR_IQT_REG);
1102 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1104 iommu->gcmd |= DMA_GCMD_QIE;
1105 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1107 /* Make sure hardware complete it */
1108 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1110 spin_unlock_irqrestore(&iommu->register_lock, flags);
1114 * Enable Queued Invalidation interface. This is a must to support
1115 * interrupt-remapping. Also used by DMA-remapping, which replaces
1116 * register based IOTLB invalidation.
1118 int dmar_enable_qi(struct intel_iommu *iommu)
1121 struct page *desc_page;
1123 if (!ecap_qis(iommu->ecap))
1127 * queued invalidation is already setup and enabled.
1132 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1139 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1146 qi->desc = page_address(desc_page);
1148 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1149 if (!qi->desc_status) {
1150 free_page((unsigned long) qi->desc);
1156 qi->free_head = qi->free_tail = 0;
1157 qi->free_cnt = QI_LENGTH;
1159 spin_lock_init(&qi->q_lock);
1161 __dmar_enable_qi(iommu);
1166 /* iommu interrupt handling. Most stuff are MSI-like. */
1174 static const char *dma_remap_fault_reasons[] =
1177 "Present bit in root entry is clear",
1178 "Present bit in context entry is clear",
1179 "Invalid context entry",
1180 "Access beyond MGAW",
1181 "PTE Write access is not set",
1182 "PTE Read access is not set",
1183 "Next page table ptr is invalid",
1184 "Root table address invalid",
1185 "Context table ptr is invalid",
1186 "non-zero reserved fields in RTP",
1187 "non-zero reserved fields in CTP",
1188 "non-zero reserved fields in PTE",
1191 static const char *intr_remap_fault_reasons[] =
1193 "Detected reserved fields in the decoded interrupt-remapped request",
1194 "Interrupt index exceeded the interrupt-remapping table size",
1195 "Present field in the IRTE entry is clear",
1196 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1197 "Detected reserved fields in the IRTE entry",
1198 "Blocked a compatibility format interrupt request",
1199 "Blocked an interrupt request due to source-id verification failure",
1202 #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1204 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1206 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1207 ARRAY_SIZE(intr_remap_fault_reasons))) {
1208 *fault_type = INTR_REMAP;
1209 return intr_remap_fault_reasons[fault_reason - 0x20];
1210 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1211 *fault_type = DMA_REMAP;
1212 return dma_remap_fault_reasons[fault_reason];
1214 *fault_type = UNKNOWN;
1219 void dmar_msi_unmask(unsigned int irq)
1221 struct intel_iommu *iommu = get_irq_data(irq);
1225 spin_lock_irqsave(&iommu->register_lock, flag);
1226 writel(0, iommu->reg + DMAR_FECTL_REG);
1227 /* Read a reg to force flush the post write */
1228 readl(iommu->reg + DMAR_FECTL_REG);
1229 spin_unlock_irqrestore(&iommu->register_lock, flag);
1232 void dmar_msi_mask(unsigned int irq)
1235 struct intel_iommu *iommu = get_irq_data(irq);
1238 spin_lock_irqsave(&iommu->register_lock, flag);
1239 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1240 /* Read a reg to force flush the post write */
1241 readl(iommu->reg + DMAR_FECTL_REG);
1242 spin_unlock_irqrestore(&iommu->register_lock, flag);
1245 void dmar_msi_write(int irq, struct msi_msg *msg)
1247 struct intel_iommu *iommu = get_irq_data(irq);
1250 spin_lock_irqsave(&iommu->register_lock, flag);
1251 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1252 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1253 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1254 spin_unlock_irqrestore(&iommu->register_lock, flag);
1257 void dmar_msi_read(int irq, struct msi_msg *msg)
1259 struct intel_iommu *iommu = get_irq_data(irq);
1262 spin_lock_irqsave(&iommu->register_lock, flag);
1263 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1264 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1265 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1266 spin_unlock_irqrestore(&iommu->register_lock, flag);
1269 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1270 u8 fault_reason, u16 source_id, unsigned long long addr)
1275 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1277 if (fault_type == INTR_REMAP)
1278 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1279 "fault index %llx\n"
1280 "INTR-REMAP:[fault reason %02d] %s\n",
1281 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1282 PCI_FUNC(source_id & 0xFF), addr >> 48,
1283 fault_reason, reason);
1286 "DMAR:[%s] Request device [%02x:%02x.%d] "
1287 "fault addr %llx \n"
1288 "DMAR:[fault reason %02d] %s\n",
1289 (type ? "DMA Read" : "DMA Write"),
1290 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1291 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1295 #define PRIMARY_FAULT_REG_LEN (16)
1296 irqreturn_t dmar_fault(int irq, void *dev_id)
1298 struct intel_iommu *iommu = dev_id;
1299 int reg, fault_index;
1303 spin_lock_irqsave(&iommu->register_lock, flag);
1304 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1306 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1309 /* TBD: ignore advanced fault log currently */
1310 if (!(fault_status & DMA_FSTS_PPF))
1313 fault_index = dma_fsts_fault_record_index(fault_status);
1314 reg = cap_fault_reg_offset(iommu->cap);
1322 /* highest 32 bits */
1323 data = readl(iommu->reg + reg +
1324 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1325 if (!(data & DMA_FRCD_F))
1328 fault_reason = dma_frcd_fault_reason(data);
1329 type = dma_frcd_type(data);
1331 data = readl(iommu->reg + reg +
1332 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1333 source_id = dma_frcd_source_id(data);
1335 guest_addr = dmar_readq(iommu->reg + reg +
1336 fault_index * PRIMARY_FAULT_REG_LEN);
1337 guest_addr = dma_frcd_page_addr(guest_addr);
1338 /* clear the fault */
1339 writel(DMA_FRCD_F, iommu->reg + reg +
1340 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1342 spin_unlock_irqrestore(&iommu->register_lock, flag);
1344 dmar_fault_do_one(iommu, type, fault_reason,
1345 source_id, guest_addr);
1348 if (fault_index >= cap_num_fault_regs(iommu->cap))
1350 spin_lock_irqsave(&iommu->register_lock, flag);
1353 /* clear all the other faults */
1354 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1355 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1357 spin_unlock_irqrestore(&iommu->register_lock, flag);
1361 int dmar_set_interrupt(struct intel_iommu *iommu)
1366 * Check if the fault interrupt is already initialized.
1373 printk(KERN_ERR "IOMMU: no free vectors\n");
1377 set_irq_data(irq, iommu);
1380 ret = arch_setup_dmar_msi(irq);
1382 set_irq_data(irq, NULL);
1388 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1390 printk(KERN_ERR "IOMMU: can't request irq\n");
1394 int __init enable_drhd_fault_handling(void)
1396 struct dmar_drhd_unit *drhd;
1399 * Enable fault control interrupt.
1401 for_each_drhd_unit(drhd) {
1403 struct intel_iommu *iommu = drhd->iommu;
1404 ret = dmar_set_interrupt(iommu);
1407 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1408 " interrupt, ret %d\n",
1409 (unsigned long long)drhd->reg_base_addr, ret);
1418 * Re-enable Queued Invalidation interface.
1420 int dmar_reenable_qi(struct intel_iommu *iommu)
1422 if (!ecap_qis(iommu->ecap))
1429 * First disable queued invalidation.
1431 dmar_disable_qi(iommu);
1433 * Then enable queued invalidation again. Since there is no pending
1434 * invalidation requests now, it's safe to re-enable queued
1437 __dmar_enable_qi(iommu);
1443 * Check interrupt remapping support in DMAR table description.
1445 int __init dmar_ir_support(void)
1447 struct acpi_table_dmar *dmar;
1448 dmar = (struct acpi_table_dmar *)dmar_tbl;
1451 return dmar->flags & 0x1;