]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/pci/dmar.c
63aa52973d4d3729365d0f0d8f9864e495f0e476
[net-next-2.6.git] / drivers / pci / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #include <linux/pci.h>
30 #include <linux/dmar.h>
31 #include <linux/iova.h>
32 #include <linux/intel-iommu.h>
33 #include <linux/timer.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/tboot.h>
37 #include <linux/dmi.h>
38
39 #define PREFIX "DMAR: "
40
41 /* No locks are needed as DMA remapping hardware unit
42  * list is constructed at boot time and hotplug of
43  * these units are not supported by the architecture.
44  */
45 LIST_HEAD(dmar_drhd_units);
46
47 static struct acpi_table_header * __initdata dmar_tbl;
48 static acpi_size dmar_tbl_size;
49
50 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51 {
52         /*
53          * add INCLUDE_ALL at the tail, so scan the list will find it at
54          * the very end.
55          */
56         if (drhd->include_all)
57                 list_add_tail(&drhd->list, &dmar_drhd_units);
58         else
59                 list_add(&drhd->list, &dmar_drhd_units);
60 }
61
62 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63                                            struct pci_dev **dev, u16 segment)
64 {
65         struct pci_bus *bus;
66         struct pci_dev *pdev = NULL;
67         struct acpi_dmar_pci_path *path;
68         int count;
69
70         bus = pci_find_bus(segment, scope->bus);
71         path = (struct acpi_dmar_pci_path *)(scope + 1);
72         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73                 / sizeof(struct acpi_dmar_pci_path);
74
75         while (count) {
76                 if (pdev)
77                         pci_dev_put(pdev);
78                 /*
79                  * Some BIOSes list non-exist devices in DMAR table, just
80                  * ignore it
81                  */
82                 if (!bus) {
83                         printk(KERN_WARNING
84                         PREFIX "Device scope bus [%d] not found\n",
85                         scope->bus);
86                         break;
87                 }
88                 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89                 if (!pdev) {
90                         printk(KERN_WARNING PREFIX
91                         "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92                                 segment, bus->number, path->dev, path->fn);
93                         break;
94                 }
95                 path ++;
96                 count --;
97                 bus = pdev->subordinate;
98         }
99         if (!pdev) {
100                 printk(KERN_WARNING PREFIX
101                 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102                 segment, scope->bus, path->dev, path->fn);
103                 *dev = NULL;
104                 return 0;
105         }
106         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107                         pdev->subordinate) || (scope->entry_type == \
108                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109                 pci_dev_put(pdev);
110                 printk(KERN_WARNING PREFIX
111                         "Device scope type does not match for %s\n",
112                          pci_name(pdev));
113                 return -EINVAL;
114         }
115         *dev = pdev;
116         return 0;
117 }
118
119 static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120                                        struct pci_dev ***devices, u16 segment)
121 {
122         struct acpi_dmar_device_scope *scope;
123         void * tmp = start;
124         int index;
125         int ret;
126
127         *cnt = 0;
128         while (start < end) {
129                 scope = start;
130                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132                         (*cnt)++;
133                 else
134                         printk(KERN_WARNING PREFIX
135                                 "Unsupported device scope\n");
136                 start += scope->length;
137         }
138         if (*cnt == 0)
139                 return 0;
140
141         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142         if (!*devices)
143                 return -ENOMEM;
144
145         start = tmp;
146         index = 0;
147         while (start < end) {
148                 scope = start;
149                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151                         ret = dmar_parse_one_dev_scope(scope,
152                                 &(*devices)[index], segment);
153                         if (ret) {
154                                 kfree(*devices);
155                                 return ret;
156                         }
157                         index ++;
158                 }
159                 start += scope->length;
160         }
161
162         return 0;
163 }
164
165 /**
166  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167  * structure which uniquely represent one DMA remapping hardware unit
168  * present in the platform
169  */
170 static int __init
171 dmar_parse_one_drhd(struct acpi_dmar_header *header)
172 {
173         struct acpi_dmar_hardware_unit *drhd;
174         struct dmar_drhd_unit *dmaru;
175         int ret = 0;
176
177         drhd = (struct acpi_dmar_hardware_unit *)header;
178         if (!drhd->address) {
179                 /* Promote an attitude of violence to a BIOS engineer today */
180                 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182                      dmi_get_system_info(DMI_BIOS_VENDOR),
183                      dmi_get_system_info(DMI_BIOS_VERSION),
184                      dmi_get_system_info(DMI_PRODUCT_VERSION));
185                 return -ENODEV;
186         }
187         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
188         if (!dmaru)
189                 return -ENOMEM;
190
191         dmaru->hdr = header;
192         dmaru->reg_base_addr = drhd->address;
193         dmaru->segment = drhd->segment;
194         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
195
196         ret = alloc_iommu(dmaru);
197         if (ret) {
198                 kfree(dmaru);
199                 return ret;
200         }
201         dmar_register_drhd_unit(dmaru);
202         return 0;
203 }
204
205 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
206 {
207         struct acpi_dmar_hardware_unit *drhd;
208         int ret = 0;
209
210         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
211
212         if (dmaru->include_all)
213                 return 0;
214
215         ret = dmar_parse_dev_scope((void *)(drhd + 1),
216                                 ((void *)drhd) + drhd->header.length,
217                                 &dmaru->devices_cnt, &dmaru->devices,
218                                 drhd->segment);
219         if (ret) {
220                 list_del(&dmaru->list);
221                 kfree(dmaru);
222         }
223         return ret;
224 }
225
226 #ifdef CONFIG_DMAR
227 LIST_HEAD(dmar_rmrr_units);
228
229 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
230 {
231         list_add(&rmrr->list, &dmar_rmrr_units);
232 }
233
234
235 static int __init
236 dmar_parse_one_rmrr(struct acpi_dmar_header *header)
237 {
238         struct acpi_dmar_reserved_memory *rmrr;
239         struct dmar_rmrr_unit *rmrru;
240
241         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
242         if (!rmrru)
243                 return -ENOMEM;
244
245         rmrru->hdr = header;
246         rmrr = (struct acpi_dmar_reserved_memory *)header;
247         rmrru->base_address = rmrr->base_address;
248         rmrru->end_address = rmrr->end_address;
249
250         dmar_register_rmrr_unit(rmrru);
251         return 0;
252 }
253
254 static int __init
255 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
256 {
257         struct acpi_dmar_reserved_memory *rmrr;
258         int ret;
259
260         rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
261         ret = dmar_parse_dev_scope((void *)(rmrr + 1),
262                 ((void *)rmrr) + rmrr->header.length,
263                 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
264
265         if (ret || (rmrru->devices_cnt == 0)) {
266                 list_del(&rmrru->list);
267                 kfree(rmrru);
268         }
269         return ret;
270 }
271
272 static LIST_HEAD(dmar_atsr_units);
273
274 static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
275 {
276         struct acpi_dmar_atsr *atsr;
277         struct dmar_atsr_unit *atsru;
278
279         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
280         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
281         if (!atsru)
282                 return -ENOMEM;
283
284         atsru->hdr = hdr;
285         atsru->include_all = atsr->flags & 0x1;
286
287         list_add(&atsru->list, &dmar_atsr_units);
288
289         return 0;
290 }
291
292 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
293 {
294         int rc;
295         struct acpi_dmar_atsr *atsr;
296
297         if (atsru->include_all)
298                 return 0;
299
300         atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
301         rc = dmar_parse_dev_scope((void *)(atsr + 1),
302                                 (void *)atsr + atsr->header.length,
303                                 &atsru->devices_cnt, &atsru->devices,
304                                 atsr->segment);
305         if (rc || !atsru->devices_cnt) {
306                 list_del(&atsru->list);
307                 kfree(atsru);
308         }
309
310         return rc;
311 }
312
313 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
314 {
315         int i;
316         struct pci_bus *bus;
317         struct acpi_dmar_atsr *atsr;
318         struct dmar_atsr_unit *atsru;
319
320         list_for_each_entry(atsru, &dmar_atsr_units, list) {
321                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
322                 if (atsr->segment == pci_domain_nr(dev->bus))
323                         goto found;
324         }
325
326         return 0;
327
328 found:
329         for (bus = dev->bus; bus; bus = bus->parent) {
330                 struct pci_dev *bridge = bus->self;
331
332                 if (!bridge || !bridge->is_pcie ||
333                     bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
334                         return 0;
335
336                 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
337                         for (i = 0; i < atsru->devices_cnt; i++)
338                                 if (atsru->devices[i] == bridge)
339                                         return 1;
340                         break;
341                 }
342         }
343
344         if (atsru->include_all)
345                 return 1;
346
347         return 0;
348 }
349 #endif
350
351 static int __init
352 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
353 {
354         struct acpi_dmar_rhsa *rhsa;
355         struct dmar_drhd_unit *drhd;
356
357         rhsa = (struct acpi_dmar_rhsa *)header;
358         for_each_drhd_unit(drhd)
359                 if (drhd->reg_base_addr == rhsa->base_address) {
360                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
361
362                         if (!node_online(node))
363                                 node = -1;
364                         drhd->iommu->node = node;
365                 return 0;
366         }
367
368         return -ENODEV;
369 }
370
371 static void __init
372 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
373 {
374         struct acpi_dmar_hardware_unit *drhd;
375         struct acpi_dmar_reserved_memory *rmrr;
376         struct acpi_dmar_atsr *atsr;
377         struct acpi_dmar_rhsa *rhsa;
378
379         switch (header->type) {
380         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
381                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
382                                     header);
383                 printk (KERN_INFO PREFIX
384                         "DRHD base: %#016Lx flags: %#x\n",
385                         (unsigned long long)drhd->address, drhd->flags);
386                 break;
387         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
388                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
389                                     header);
390                 printk (KERN_INFO PREFIX
391                         "RMRR base: %#016Lx end: %#016Lx\n",
392                         (unsigned long long)rmrr->base_address,
393                         (unsigned long long)rmrr->end_address);
394                 break;
395         case ACPI_DMAR_TYPE_ATSR:
396                 atsr = container_of(header, struct acpi_dmar_atsr, header);
397                 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
398                 break;
399         case ACPI_DMAR_HARDWARE_AFFINITY:
400                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
401                 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
402                        (unsigned long long)rhsa->base_address,
403                        rhsa->proximity_domain);
404                 break;
405         }
406 }
407
408 /**
409  * dmar_table_detect - checks to see if the platform supports DMAR devices
410  */
411 static int __init dmar_table_detect(void)
412 {
413         acpi_status status = AE_OK;
414
415         /* if we could find DMAR table, then there are DMAR devices */
416         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
417                                 (struct acpi_table_header **)&dmar_tbl,
418                                 &dmar_tbl_size);
419
420         if (ACPI_SUCCESS(status) && !dmar_tbl) {
421                 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
422                 status = AE_NOT_FOUND;
423         }
424
425         return (ACPI_SUCCESS(status) ? 1 : 0);
426 }
427
428 /**
429  * parse_dmar_table - parses the DMA reporting table
430  */
431 static int __init
432 parse_dmar_table(void)
433 {
434         struct acpi_table_dmar *dmar;
435         struct acpi_dmar_header *entry_header;
436         int ret = 0;
437
438         /*
439          * Do it again, earlier dmar_tbl mapping could be mapped with
440          * fixed map.
441          */
442         dmar_table_detect();
443
444         /*
445          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
446          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
447          */
448         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
449
450         dmar = (struct acpi_table_dmar *)dmar_tbl;
451         if (!dmar)
452                 return -ENODEV;
453
454         if (dmar->width < PAGE_SHIFT - 1) {
455                 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
456                 return -EINVAL;
457         }
458
459         printk (KERN_INFO PREFIX "Host address width %d\n",
460                 dmar->width + 1);
461
462         entry_header = (struct acpi_dmar_header *)(dmar + 1);
463         while (((unsigned long)entry_header) <
464                         (((unsigned long)dmar) + dmar_tbl->length)) {
465                 /* Avoid looping forever on bad ACPI tables */
466                 if (entry_header->length == 0) {
467                         printk(KERN_WARNING PREFIX
468                                 "Invalid 0-length structure\n");
469                         ret = -EINVAL;
470                         break;
471                 }
472
473                 dmar_table_print_dmar_entry(entry_header);
474
475                 switch (entry_header->type) {
476                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
477                         ret = dmar_parse_one_drhd(entry_header);
478                         break;
479                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
480 #ifdef CONFIG_DMAR
481                         ret = dmar_parse_one_rmrr(entry_header);
482 #endif
483                         break;
484                 case ACPI_DMAR_TYPE_ATSR:
485 #ifdef CONFIG_DMAR
486                         ret = dmar_parse_one_atsr(entry_header);
487 #endif
488                         break;
489                 case ACPI_DMAR_HARDWARE_AFFINITY:
490                         ret = dmar_parse_one_rhsa(entry_header);
491                         break;
492                 default:
493                         printk(KERN_WARNING PREFIX
494                                 "Unknown DMAR structure type %d\n",
495                                 entry_header->type);
496                         ret = 0; /* for forward compatibility */
497                         break;
498                 }
499                 if (ret)
500                         break;
501
502                 entry_header = ((void *)entry_header + entry_header->length);
503         }
504         return ret;
505 }
506
507 int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
508                           struct pci_dev *dev)
509 {
510         int index;
511
512         while (dev) {
513                 for (index = 0; index < cnt; index++)
514                         if (dev == devices[index])
515                                 return 1;
516
517                 /* Check our parent */
518                 dev = dev->bus->self;
519         }
520
521         return 0;
522 }
523
524 struct dmar_drhd_unit *
525 dmar_find_matched_drhd_unit(struct pci_dev *dev)
526 {
527         struct dmar_drhd_unit *dmaru = NULL;
528         struct acpi_dmar_hardware_unit *drhd;
529
530         list_for_each_entry(dmaru, &dmar_drhd_units, list) {
531                 drhd = container_of(dmaru->hdr,
532                                     struct acpi_dmar_hardware_unit,
533                                     header);
534
535                 if (dmaru->include_all &&
536                     drhd->segment == pci_domain_nr(dev->bus))
537                         return dmaru;
538
539                 if (dmar_pci_device_match(dmaru->devices,
540                                           dmaru->devices_cnt, dev))
541                         return dmaru;
542         }
543
544         return NULL;
545 }
546
547 int __init dmar_dev_scope_init(void)
548 {
549         struct dmar_drhd_unit *drhd, *drhd_n;
550         int ret = -ENODEV;
551
552         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
553                 ret = dmar_parse_dev(drhd);
554                 if (ret)
555                         return ret;
556         }
557
558 #ifdef CONFIG_DMAR
559         {
560                 struct dmar_rmrr_unit *rmrr, *rmrr_n;
561                 struct dmar_atsr_unit *atsr, *atsr_n;
562
563                 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
564                         ret = rmrr_parse_dev(rmrr);
565                         if (ret)
566                                 return ret;
567                 }
568
569                 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
570                         ret = atsr_parse_dev(atsr);
571                         if (ret)
572                                 return ret;
573                 }
574         }
575 #endif
576
577         return ret;
578 }
579
580
581 int __init dmar_table_init(void)
582 {
583         static int dmar_table_initialized;
584         int ret;
585
586         if (dmar_table_initialized)
587                 return 0;
588
589         dmar_table_initialized = 1;
590
591         ret = parse_dmar_table();
592         if (ret) {
593                 if (ret != -ENODEV)
594                         printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
595                 return ret;
596         }
597
598         if (list_empty(&dmar_drhd_units)) {
599                 printk(KERN_INFO PREFIX "No DMAR devices found\n");
600                 return -ENODEV;
601         }
602
603 #ifdef CONFIG_DMAR
604         if (list_empty(&dmar_rmrr_units))
605                 printk(KERN_INFO PREFIX "No RMRR found\n");
606
607         if (list_empty(&dmar_atsr_units))
608                 printk(KERN_INFO PREFIX "No ATSR found\n");
609 #endif
610
611         return 0;
612 }
613
614 void __init detect_intel_iommu(void)
615 {
616         int ret;
617
618         ret = dmar_table_detect();
619
620         {
621 #ifdef CONFIG_INTR_REMAP
622                 struct acpi_table_dmar *dmar;
623                 /*
624                  * for now we will disable dma-remapping when interrupt
625                  * remapping is enabled.
626                  * When support for queued invalidation for IOTLB invalidation
627                  * is added, we will not need this any more.
628                  */
629                 dmar = (struct acpi_table_dmar *) dmar_tbl;
630                 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
631                         printk(KERN_INFO
632                                "Queued invalidation will be enabled to support "
633                                "x2apic and Intr-remapping.\n");
634 #endif
635 #ifdef CONFIG_DMAR
636                 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
637                     !dmar_disabled)
638                         iommu_detected = 1;
639 #endif
640         }
641         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
642         dmar_tbl = NULL;
643 }
644
645
646 int alloc_iommu(struct dmar_drhd_unit *drhd)
647 {
648         struct intel_iommu *iommu;
649         int map_size;
650         u32 ver;
651         static int iommu_allocated = 0;
652         int agaw = 0;
653         int msagaw = 0;
654
655         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
656         if (!iommu)
657                 return -ENOMEM;
658
659         iommu->seq_id = iommu_allocated++;
660         sprintf (iommu->name, "dmar%d", iommu->seq_id);
661
662         iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
663         if (!iommu->reg) {
664                 printk(KERN_ERR "IOMMU: can't map the region\n");
665                 goto error;
666         }
667         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
668         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
669
670         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
671                 /* Promote an attitude of violence to a BIOS engineer today */
672                 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
673                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
674                      drhd->reg_base_addr,
675                      dmi_get_system_info(DMI_BIOS_VENDOR),
676                      dmi_get_system_info(DMI_BIOS_VERSION),
677                      dmi_get_system_info(DMI_PRODUCT_VERSION));
678                 goto err_unmap;
679         }
680
681 #ifdef CONFIG_DMAR
682         agaw = iommu_calculate_agaw(iommu);
683         if (agaw < 0) {
684                 printk(KERN_ERR
685                        "Cannot get a valid agaw for iommu (seq_id = %d)\n",
686                        iommu->seq_id);
687                 goto err_unmap;
688         }
689         msagaw = iommu_calculate_max_sagaw(iommu);
690         if (msagaw < 0) {
691                 printk(KERN_ERR
692                         "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
693                         iommu->seq_id);
694                 goto err_unmap;
695         }
696 #endif
697         iommu->agaw = agaw;
698         iommu->msagaw = msagaw;
699
700         iommu->node = -1;
701
702         /* the registers might be more than one page */
703         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
704                 cap_max_fault_reg_offset(iommu->cap));
705         map_size = VTD_PAGE_ALIGN(map_size);
706         if (map_size > VTD_PAGE_SIZE) {
707                 iounmap(iommu->reg);
708                 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
709                 if (!iommu->reg) {
710                         printk(KERN_ERR "IOMMU: can't map the region\n");
711                         goto error;
712                 }
713         }
714
715         ver = readl(iommu->reg + DMAR_VER_REG);
716         pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
717                 (unsigned long long)drhd->reg_base_addr,
718                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
719                 (unsigned long long)iommu->cap,
720                 (unsigned long long)iommu->ecap);
721
722         spin_lock_init(&iommu->register_lock);
723
724         drhd->iommu = iommu;
725         return 0;
726
727  err_unmap:
728         iounmap(iommu->reg);
729  error:
730         kfree(iommu);
731         return -1;
732 }
733
734 void free_iommu(struct intel_iommu *iommu)
735 {
736         if (!iommu)
737                 return;
738
739 #ifdef CONFIG_DMAR
740         free_dmar_iommu(iommu);
741 #endif
742
743         if (iommu->reg)
744                 iounmap(iommu->reg);
745         kfree(iommu);
746 }
747
748 /*
749  * Reclaim all the submitted descriptors which have completed its work.
750  */
751 static inline void reclaim_free_desc(struct q_inval *qi)
752 {
753         while (qi->desc_status[qi->free_tail] == QI_DONE ||
754                qi->desc_status[qi->free_tail] == QI_ABORT) {
755                 qi->desc_status[qi->free_tail] = QI_FREE;
756                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
757                 qi->free_cnt++;
758         }
759 }
760
761 static int qi_check_fault(struct intel_iommu *iommu, int index)
762 {
763         u32 fault;
764         int head, tail;
765         struct q_inval *qi = iommu->qi;
766         int wait_index = (index + 1) % QI_LENGTH;
767
768         if (qi->desc_status[wait_index] == QI_ABORT)
769                 return -EAGAIN;
770
771         fault = readl(iommu->reg + DMAR_FSTS_REG);
772
773         /*
774          * If IQE happens, the head points to the descriptor associated
775          * with the error. No new descriptors are fetched until the IQE
776          * is cleared.
777          */
778         if (fault & DMA_FSTS_IQE) {
779                 head = readl(iommu->reg + DMAR_IQH_REG);
780                 if ((head >> DMAR_IQ_SHIFT) == index) {
781                         printk(KERN_ERR "VT-d detected invalid descriptor: "
782                                 "low=%llx, high=%llx\n",
783                                 (unsigned long long)qi->desc[index].low,
784                                 (unsigned long long)qi->desc[index].high);
785                         memcpy(&qi->desc[index], &qi->desc[wait_index],
786                                         sizeof(struct qi_desc));
787                         __iommu_flush_cache(iommu, &qi->desc[index],
788                                         sizeof(struct qi_desc));
789                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
790                         return -EINVAL;
791                 }
792         }
793
794         /*
795          * If ITE happens, all pending wait_desc commands are aborted.
796          * No new descriptors are fetched until the ITE is cleared.
797          */
798         if (fault & DMA_FSTS_ITE) {
799                 head = readl(iommu->reg + DMAR_IQH_REG);
800                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
801                 head |= 1;
802                 tail = readl(iommu->reg + DMAR_IQT_REG);
803                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
804
805                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
806
807                 do {
808                         if (qi->desc_status[head] == QI_IN_USE)
809                                 qi->desc_status[head] = QI_ABORT;
810                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
811                 } while (head != tail);
812
813                 if (qi->desc_status[wait_index] == QI_ABORT)
814                         return -EAGAIN;
815         }
816
817         if (fault & DMA_FSTS_ICE)
818                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
819
820         return 0;
821 }
822
823 /*
824  * Submit the queued invalidation descriptor to the remapping
825  * hardware unit and wait for its completion.
826  */
827 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
828 {
829         int rc;
830         struct q_inval *qi = iommu->qi;
831         struct qi_desc *hw, wait_desc;
832         int wait_index, index;
833         unsigned long flags;
834
835         if (!qi)
836                 return 0;
837
838         hw = qi->desc;
839
840 restart:
841         rc = 0;
842
843         spin_lock_irqsave(&qi->q_lock, flags);
844         while (qi->free_cnt < 3) {
845                 spin_unlock_irqrestore(&qi->q_lock, flags);
846                 cpu_relax();
847                 spin_lock_irqsave(&qi->q_lock, flags);
848         }
849
850         index = qi->free_head;
851         wait_index = (index + 1) % QI_LENGTH;
852
853         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
854
855         hw[index] = *desc;
856
857         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
858                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
859         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
860
861         hw[wait_index] = wait_desc;
862
863         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
864         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
865
866         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
867         qi->free_cnt -= 2;
868
869         /*
870          * update the HW tail register indicating the presence of
871          * new descriptors.
872          */
873         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
874
875         while (qi->desc_status[wait_index] != QI_DONE) {
876                 /*
877                  * We will leave the interrupts disabled, to prevent interrupt
878                  * context to queue another cmd while a cmd is already submitted
879                  * and waiting for completion on this cpu. This is to avoid
880                  * a deadlock where the interrupt context can wait indefinitely
881                  * for free slots in the queue.
882                  */
883                 rc = qi_check_fault(iommu, index);
884                 if (rc)
885                         break;
886
887                 spin_unlock(&qi->q_lock);
888                 cpu_relax();
889                 spin_lock(&qi->q_lock);
890         }
891
892         qi->desc_status[index] = QI_DONE;
893
894         reclaim_free_desc(qi);
895         spin_unlock_irqrestore(&qi->q_lock, flags);
896
897         if (rc == -EAGAIN)
898                 goto restart;
899
900         return rc;
901 }
902
903 /*
904  * Flush the global interrupt entry cache.
905  */
906 void qi_global_iec(struct intel_iommu *iommu)
907 {
908         struct qi_desc desc;
909
910         desc.low = QI_IEC_TYPE;
911         desc.high = 0;
912
913         /* should never fail */
914         qi_submit_sync(&desc, iommu);
915 }
916
917 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
918                       u64 type)
919 {
920         struct qi_desc desc;
921
922         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
923                         | QI_CC_GRAN(type) | QI_CC_TYPE;
924         desc.high = 0;
925
926         qi_submit_sync(&desc, iommu);
927 }
928
929 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
930                     unsigned int size_order, u64 type)
931 {
932         u8 dw = 0, dr = 0;
933
934         struct qi_desc desc;
935         int ih = 0;
936
937         if (cap_write_drain(iommu->cap))
938                 dw = 1;
939
940         if (cap_read_drain(iommu->cap))
941                 dr = 1;
942
943         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
944                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
945         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
946                 | QI_IOTLB_AM(size_order);
947
948         qi_submit_sync(&desc, iommu);
949 }
950
951 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
952                         u64 addr, unsigned mask)
953 {
954         struct qi_desc desc;
955
956         if (mask) {
957                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
958                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
959                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
960         } else
961                 desc.high = QI_DEV_IOTLB_ADDR(addr);
962
963         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
964                 qdep = 0;
965
966         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
967                    QI_DIOTLB_TYPE;
968
969         qi_submit_sync(&desc, iommu);
970 }
971
972 /*
973  * Disable Queued Invalidation interface.
974  */
975 void dmar_disable_qi(struct intel_iommu *iommu)
976 {
977         unsigned long flags;
978         u32 sts;
979         cycles_t start_time = get_cycles();
980
981         if (!ecap_qis(iommu->ecap))
982                 return;
983
984         spin_lock_irqsave(&iommu->register_lock, flags);
985
986         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
987         if (!(sts & DMA_GSTS_QIES))
988                 goto end;
989
990         /*
991          * Give a chance to HW to complete the pending invalidation requests.
992          */
993         while ((readl(iommu->reg + DMAR_IQT_REG) !=
994                 readl(iommu->reg + DMAR_IQH_REG)) &&
995                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
996                 cpu_relax();
997
998         iommu->gcmd &= ~DMA_GCMD_QIE;
999         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1000
1001         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1002                       !(sts & DMA_GSTS_QIES), sts);
1003 end:
1004         spin_unlock_irqrestore(&iommu->register_lock, flags);
1005 }
1006
1007 /*
1008  * Enable queued invalidation.
1009  */
1010 static void __dmar_enable_qi(struct intel_iommu *iommu)
1011 {
1012         u32 sts;
1013         unsigned long flags;
1014         struct q_inval *qi = iommu->qi;
1015
1016         qi->free_head = qi->free_tail = 0;
1017         qi->free_cnt = QI_LENGTH;
1018
1019         spin_lock_irqsave(&iommu->register_lock, flags);
1020
1021         /* write zero to the tail reg */
1022         writel(0, iommu->reg + DMAR_IQT_REG);
1023
1024         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1025
1026         iommu->gcmd |= DMA_GCMD_QIE;
1027         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1028
1029         /* Make sure hardware complete it */
1030         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1031
1032         spin_unlock_irqrestore(&iommu->register_lock, flags);
1033 }
1034
1035 /*
1036  * Enable Queued Invalidation interface. This is a must to support
1037  * interrupt-remapping. Also used by DMA-remapping, which replaces
1038  * register based IOTLB invalidation.
1039  */
1040 int dmar_enable_qi(struct intel_iommu *iommu)
1041 {
1042         struct q_inval *qi;
1043         struct page *desc_page;
1044
1045         if (!ecap_qis(iommu->ecap))
1046                 return -ENOENT;
1047
1048         /*
1049          * queued invalidation is already setup and enabled.
1050          */
1051         if (iommu->qi)
1052                 return 0;
1053
1054         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1055         if (!iommu->qi)
1056                 return -ENOMEM;
1057
1058         qi = iommu->qi;
1059
1060
1061         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1062         if (!desc_page) {
1063                 kfree(qi);
1064                 iommu->qi = 0;
1065                 return -ENOMEM;
1066         }
1067
1068         qi->desc = page_address(desc_page);
1069
1070         qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1071         if (!qi->desc_status) {
1072                 free_page((unsigned long) qi->desc);
1073                 kfree(qi);
1074                 iommu->qi = 0;
1075                 return -ENOMEM;
1076         }
1077
1078         qi->free_head = qi->free_tail = 0;
1079         qi->free_cnt = QI_LENGTH;
1080
1081         spin_lock_init(&qi->q_lock);
1082
1083         __dmar_enable_qi(iommu);
1084
1085         return 0;
1086 }
1087
1088 /* iommu interrupt handling. Most stuff are MSI-like. */
1089
1090 enum faulttype {
1091         DMA_REMAP,
1092         INTR_REMAP,
1093         UNKNOWN,
1094 };
1095
1096 static const char *dma_remap_fault_reasons[] =
1097 {
1098         "Software",
1099         "Present bit in root entry is clear",
1100         "Present bit in context entry is clear",
1101         "Invalid context entry",
1102         "Access beyond MGAW",
1103         "PTE Write access is not set",
1104         "PTE Read access is not set",
1105         "Next page table ptr is invalid",
1106         "Root table address invalid",
1107         "Context table ptr is invalid",
1108         "non-zero reserved fields in RTP",
1109         "non-zero reserved fields in CTP",
1110         "non-zero reserved fields in PTE",
1111 };
1112
1113 static const char *intr_remap_fault_reasons[] =
1114 {
1115         "Detected reserved fields in the decoded interrupt-remapped request",
1116         "Interrupt index exceeded the interrupt-remapping table size",
1117         "Present field in the IRTE entry is clear",
1118         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1119         "Detected reserved fields in the IRTE entry",
1120         "Blocked a compatibility format interrupt request",
1121         "Blocked an interrupt request due to source-id verification failure",
1122 };
1123
1124 #define MAX_FAULT_REASON_IDX    (ARRAY_SIZE(fault_reason_strings) - 1)
1125
1126 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1127 {
1128         if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1129                                      ARRAY_SIZE(intr_remap_fault_reasons))) {
1130                 *fault_type = INTR_REMAP;
1131                 return intr_remap_fault_reasons[fault_reason - 0x20];
1132         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1133                 *fault_type = DMA_REMAP;
1134                 return dma_remap_fault_reasons[fault_reason];
1135         } else {
1136                 *fault_type = UNKNOWN;
1137                 return "Unknown";
1138         }
1139 }
1140
1141 void dmar_msi_unmask(unsigned int irq)
1142 {
1143         struct intel_iommu *iommu = get_irq_data(irq);
1144         unsigned long flag;
1145
1146         /* unmask it */
1147         spin_lock_irqsave(&iommu->register_lock, flag);
1148         writel(0, iommu->reg + DMAR_FECTL_REG);
1149         /* Read a reg to force flush the post write */
1150         readl(iommu->reg + DMAR_FECTL_REG);
1151         spin_unlock_irqrestore(&iommu->register_lock, flag);
1152 }
1153
1154 void dmar_msi_mask(unsigned int irq)
1155 {
1156         unsigned long flag;
1157         struct intel_iommu *iommu = get_irq_data(irq);
1158
1159         /* mask it */
1160         spin_lock_irqsave(&iommu->register_lock, flag);
1161         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1162         /* Read a reg to force flush the post write */
1163         readl(iommu->reg + DMAR_FECTL_REG);
1164         spin_unlock_irqrestore(&iommu->register_lock, flag);
1165 }
1166
1167 void dmar_msi_write(int irq, struct msi_msg *msg)
1168 {
1169         struct intel_iommu *iommu = get_irq_data(irq);
1170         unsigned long flag;
1171
1172         spin_lock_irqsave(&iommu->register_lock, flag);
1173         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1174         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1175         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1176         spin_unlock_irqrestore(&iommu->register_lock, flag);
1177 }
1178
1179 void dmar_msi_read(int irq, struct msi_msg *msg)
1180 {
1181         struct intel_iommu *iommu = get_irq_data(irq);
1182         unsigned long flag;
1183
1184         spin_lock_irqsave(&iommu->register_lock, flag);
1185         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1186         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1187         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1188         spin_unlock_irqrestore(&iommu->register_lock, flag);
1189 }
1190
1191 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1192                 u8 fault_reason, u16 source_id, unsigned long long addr)
1193 {
1194         const char *reason;
1195         int fault_type;
1196
1197         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1198
1199         if (fault_type == INTR_REMAP)
1200                 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1201                        "fault index %llx\n"
1202                         "INTR-REMAP:[fault reason %02d] %s\n",
1203                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1204                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1205                         fault_reason, reason);
1206         else
1207                 printk(KERN_ERR
1208                        "DMAR:[%s] Request device [%02x:%02x.%d] "
1209                        "fault addr %llx \n"
1210                        "DMAR:[fault reason %02d] %s\n",
1211                        (type ? "DMA Read" : "DMA Write"),
1212                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1213                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1214         return 0;
1215 }
1216
1217 #define PRIMARY_FAULT_REG_LEN (16)
1218 irqreturn_t dmar_fault(int irq, void *dev_id)
1219 {
1220         struct intel_iommu *iommu = dev_id;
1221         int reg, fault_index;
1222         u32 fault_status;
1223         unsigned long flag;
1224
1225         spin_lock_irqsave(&iommu->register_lock, flag);
1226         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1227         if (fault_status)
1228                 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1229                        fault_status);
1230
1231         /* TBD: ignore advanced fault log currently */
1232         if (!(fault_status & DMA_FSTS_PPF))
1233                 goto clear_rest;
1234
1235         fault_index = dma_fsts_fault_record_index(fault_status);
1236         reg = cap_fault_reg_offset(iommu->cap);
1237         while (1) {
1238                 u8 fault_reason;
1239                 u16 source_id;
1240                 u64 guest_addr;
1241                 int type;
1242                 u32 data;
1243
1244                 /* highest 32 bits */
1245                 data = readl(iommu->reg + reg +
1246                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1247                 if (!(data & DMA_FRCD_F))
1248                         break;
1249
1250                 fault_reason = dma_frcd_fault_reason(data);
1251                 type = dma_frcd_type(data);
1252
1253                 data = readl(iommu->reg + reg +
1254                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1255                 source_id = dma_frcd_source_id(data);
1256
1257                 guest_addr = dmar_readq(iommu->reg + reg +
1258                                 fault_index * PRIMARY_FAULT_REG_LEN);
1259                 guest_addr = dma_frcd_page_addr(guest_addr);
1260                 /* clear the fault */
1261                 writel(DMA_FRCD_F, iommu->reg + reg +
1262                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1263
1264                 spin_unlock_irqrestore(&iommu->register_lock, flag);
1265
1266                 dmar_fault_do_one(iommu, type, fault_reason,
1267                                 source_id, guest_addr);
1268
1269                 fault_index++;
1270                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1271                         fault_index = 0;
1272                 spin_lock_irqsave(&iommu->register_lock, flag);
1273         }
1274 clear_rest:
1275         /* clear all the other faults */
1276         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1277         writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1278
1279         spin_unlock_irqrestore(&iommu->register_lock, flag);
1280         return IRQ_HANDLED;
1281 }
1282
1283 int dmar_set_interrupt(struct intel_iommu *iommu)
1284 {
1285         int irq, ret;
1286
1287         /*
1288          * Check if the fault interrupt is already initialized.
1289          */
1290         if (iommu->irq)
1291                 return 0;
1292
1293         irq = create_irq();
1294         if (!irq) {
1295                 printk(KERN_ERR "IOMMU: no free vectors\n");
1296                 return -EINVAL;
1297         }
1298
1299         set_irq_data(irq, iommu);
1300         iommu->irq = irq;
1301
1302         ret = arch_setup_dmar_msi(irq);
1303         if (ret) {
1304                 set_irq_data(irq, NULL);
1305                 iommu->irq = 0;
1306                 destroy_irq(irq);
1307                 return ret;
1308         }
1309
1310         ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1311         if (ret)
1312                 printk(KERN_ERR "IOMMU: can't request irq\n");
1313         return ret;
1314 }
1315
1316 int __init enable_drhd_fault_handling(void)
1317 {
1318         struct dmar_drhd_unit *drhd;
1319
1320         /*
1321          * Enable fault control interrupt.
1322          */
1323         for_each_drhd_unit(drhd) {
1324                 int ret;
1325                 struct intel_iommu *iommu = drhd->iommu;
1326                 ret = dmar_set_interrupt(iommu);
1327
1328                 if (ret) {
1329                         printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1330                                " interrupt, ret %d\n",
1331                                (unsigned long long)drhd->reg_base_addr, ret);
1332                         return -1;
1333                 }
1334         }
1335
1336         return 0;
1337 }
1338
1339 /*
1340  * Re-enable Queued Invalidation interface.
1341  */
1342 int dmar_reenable_qi(struct intel_iommu *iommu)
1343 {
1344         if (!ecap_qis(iommu->ecap))
1345                 return -ENOENT;
1346
1347         if (!iommu->qi)
1348                 return -ENOENT;
1349
1350         /*
1351          * First disable queued invalidation.
1352          */
1353         dmar_disable_qi(iommu);
1354         /*
1355          * Then enable queued invalidation again. Since there is no pending
1356          * invalidation requests now, it's safe to re-enable queued
1357          * invalidation.
1358          */
1359         __dmar_enable_qi(iommu);
1360
1361         return 0;
1362 }
1363
1364 /*
1365  * Check interrupt remapping support in DMAR table description.
1366  */
1367 int dmar_ir_support(void)
1368 {
1369         struct acpi_table_dmar *dmar;
1370         dmar = (struct acpi_table_dmar *)dmar_tbl;
1371         return dmar->flags & 0x1;
1372 }