]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/pci/dmar.c
intel-iommu: Combine the BIOS DMAR table warning messages
[net-next-2.6.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
10e5247f 38
a192a958 39#define PREFIX "DMAR: "
10e5247f
KA
40
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
10e5247f
KA
46
47static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 48static acpi_size dmar_tbl_size;
10e5247f
KA
49
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
10e5247f
KA
62static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
5715f0f9 133 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
10e5247f 134 printk(KERN_WARNING PREFIX
5715f0f9
YL
135 "Unsupported device scope\n");
136 }
10e5247f
KA
137 start += scope->length;
138 }
139 if (*cnt == 0)
140 return 0;
141
142 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143 if (!*devices)
144 return -ENOMEM;
145
146 start = tmp;
147 index = 0;
148 while (start < end) {
149 scope = start;
150 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152 ret = dmar_parse_one_dev_scope(scope,
153 &(*devices)[index], segment);
154 if (ret) {
155 kfree(*devices);
156 return ret;
157 }
158 index ++;
159 }
160 start += scope->length;
161 }
162
163 return 0;
164}
165
166/**
167 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
168 * structure which uniquely represent one DMA remapping hardware unit
169 * present in the platform
170 */
171static int __init
172dmar_parse_one_drhd(struct acpi_dmar_header *header)
173{
174 struct acpi_dmar_hardware_unit *drhd;
175 struct dmar_drhd_unit *dmaru;
176 int ret = 0;
10e5247f 177
e523b38e 178 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
179 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
180 if (!dmaru)
181 return -ENOMEM;
182
1886e8a9 183 dmaru->hdr = header;
10e5247f 184 dmaru->reg_base_addr = drhd->address;
276dbf99 185 dmaru->segment = drhd->segment;
10e5247f
KA
186 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
187
1886e8a9
SS
188 ret = alloc_iommu(dmaru);
189 if (ret) {
190 kfree(dmaru);
191 return ret;
192 }
193 dmar_register_drhd_unit(dmaru);
194 return 0;
195}
196
f82851a8 197static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
198{
199 struct acpi_dmar_hardware_unit *drhd;
f82851a8 200 int ret = 0;
1886e8a9
SS
201
202 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
203
2e824f79
YZ
204 if (dmaru->include_all)
205 return 0;
206
207 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 208 ((void *)drhd) + drhd->header.length,
10e5247f
KA
209 &dmaru->devices_cnt, &dmaru->devices,
210 drhd->segment);
1c7d1bca 211 if (ret) {
1886e8a9 212 list_del(&dmaru->list);
10e5247f 213 kfree(dmaru);
1886e8a9 214 }
10e5247f
KA
215 return ret;
216}
217
aaa9d1dd
SS
218#ifdef CONFIG_DMAR
219LIST_HEAD(dmar_rmrr_units);
220
221static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
222{
223 list_add(&rmrr->list, &dmar_rmrr_units);
224}
225
226
10e5247f
KA
227static int __init
228dmar_parse_one_rmrr(struct acpi_dmar_header *header)
229{
230 struct acpi_dmar_reserved_memory *rmrr;
231 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
232
233 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
234 if (!rmrru)
235 return -ENOMEM;
236
1886e8a9 237 rmrru->hdr = header;
10e5247f
KA
238 rmrr = (struct acpi_dmar_reserved_memory *)header;
239 rmrru->base_address = rmrr->base_address;
240 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
241
242 dmar_register_rmrr_unit(rmrru);
243 return 0;
244}
245
246static int __init
247rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
248{
249 struct acpi_dmar_reserved_memory *rmrr;
250 int ret;
251
252 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 253 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 254 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
255 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
256
1886e8a9
SS
257 if (ret || (rmrru->devices_cnt == 0)) {
258 list_del(&rmrru->list);
10e5247f 259 kfree(rmrru);
1886e8a9 260 }
10e5247f
KA
261 return ret;
262}
aa5d2b51
YZ
263
264static LIST_HEAD(dmar_atsr_units);
265
266static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
267{
268 struct acpi_dmar_atsr *atsr;
269 struct dmar_atsr_unit *atsru;
270
271 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
272 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
273 if (!atsru)
274 return -ENOMEM;
275
276 atsru->hdr = hdr;
277 atsru->include_all = atsr->flags & 0x1;
278
279 list_add(&atsru->list, &dmar_atsr_units);
280
281 return 0;
282}
283
284static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
285{
286 int rc;
287 struct acpi_dmar_atsr *atsr;
288
289 if (atsru->include_all)
290 return 0;
291
292 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
293 rc = dmar_parse_dev_scope((void *)(atsr + 1),
294 (void *)atsr + atsr->header.length,
295 &atsru->devices_cnt, &atsru->devices,
296 atsr->segment);
297 if (rc || !atsru->devices_cnt) {
298 list_del(&atsru->list);
299 kfree(atsru);
300 }
301
302 return rc;
303}
304
305int dmar_find_matched_atsr_unit(struct pci_dev *dev)
306{
307 int i;
308 struct pci_bus *bus;
309 struct acpi_dmar_atsr *atsr;
310 struct dmar_atsr_unit *atsru;
311
dda56549
Y
312 dev = pci_physfn(dev);
313
aa5d2b51
YZ
314 list_for_each_entry(atsru, &dmar_atsr_units, list) {
315 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
316 if (atsr->segment == pci_domain_nr(dev->bus))
317 goto found;
318 }
319
320 return 0;
321
322found:
323 for (bus = dev->bus; bus; bus = bus->parent) {
324 struct pci_dev *bridge = bus->self;
325
5f4d91a1 326 if (!bridge || !pci_is_pcie(bridge) ||
aa5d2b51
YZ
327 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
328 return 0;
329
330 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
331 for (i = 0; i < atsru->devices_cnt; i++)
332 if (atsru->devices[i] == bridge)
333 return 1;
334 break;
335 }
336 }
337
338 if (atsru->include_all)
339 return 1;
340
341 return 0;
342}
aaa9d1dd 343#endif
10e5247f 344
aa697079 345#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
346static int __init
347dmar_parse_one_rhsa(struct acpi_dmar_header *header)
348{
349 struct acpi_dmar_rhsa *rhsa;
350 struct dmar_drhd_unit *drhd;
351
352 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 353 for_each_drhd_unit(drhd) {
ee34b32d
SS
354 if (drhd->reg_base_addr == rhsa->base_address) {
355 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
356
357 if (!node_online(node))
358 node = -1;
359 drhd->iommu->node = node;
aa697079
DW
360 return 0;
361 }
ee34b32d 362 }
aa697079
DW
363 WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
364 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
365 drhd->reg_base_addr,
366 dmi_get_system_info(DMI_BIOS_VENDOR),
367 dmi_get_system_info(DMI_BIOS_VERSION),
368 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 369
aa697079 370 return 0;
ee34b32d 371}
aa697079 372#endif
ee34b32d 373
10e5247f
KA
374static void __init
375dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
376{
377 struct acpi_dmar_hardware_unit *drhd;
378 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 379 struct acpi_dmar_atsr *atsr;
17b60977 380 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
381
382 switch (header->type) {
383 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
384 drhd = container_of(header, struct acpi_dmar_hardware_unit,
385 header);
10e5247f 386 printk (KERN_INFO PREFIX
aa5d2b51
YZ
387 "DRHD base: %#016Lx flags: %#x\n",
388 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
389 break;
390 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
391 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
392 header);
10e5247f 393 printk (KERN_INFO PREFIX
aa5d2b51 394 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
395 (unsigned long long)rmrr->base_address,
396 (unsigned long long)rmrr->end_address);
10e5247f 397 break;
aa5d2b51
YZ
398 case ACPI_DMAR_TYPE_ATSR:
399 atsr = container_of(header, struct acpi_dmar_atsr, header);
400 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
401 break;
17b60977
RD
402 case ACPI_DMAR_HARDWARE_AFFINITY:
403 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
404 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
405 (unsigned long long)rhsa->base_address,
406 rhsa->proximity_domain);
407 break;
10e5247f
KA
408 }
409}
410
f6dd5c31
YL
411/**
412 * dmar_table_detect - checks to see if the platform supports DMAR devices
413 */
414static int __init dmar_table_detect(void)
415{
416 acpi_status status = AE_OK;
417
418 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
419 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
420 (struct acpi_table_header **)&dmar_tbl,
421 &dmar_tbl_size);
f6dd5c31
YL
422
423 if (ACPI_SUCCESS(status) && !dmar_tbl) {
424 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
425 status = AE_NOT_FOUND;
426 }
427
428 return (ACPI_SUCCESS(status) ? 1 : 0);
429}
aaa9d1dd 430
10e5247f
KA
431/**
432 * parse_dmar_table - parses the DMA reporting table
433 */
434static int __init
435parse_dmar_table(void)
436{
437 struct acpi_table_dmar *dmar;
438 struct acpi_dmar_header *entry_header;
439 int ret = 0;
440
f6dd5c31
YL
441 /*
442 * Do it again, earlier dmar_tbl mapping could be mapped with
443 * fixed map.
444 */
445 dmar_table_detect();
446
a59b50e9
JC
447 /*
448 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
449 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
450 */
451 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
452
10e5247f
KA
453 dmar = (struct acpi_table_dmar *)dmar_tbl;
454 if (!dmar)
455 return -ENODEV;
456
5b6985ce 457 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 458 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
459 return -EINVAL;
460 }
461
462 printk (KERN_INFO PREFIX "Host address width %d\n",
463 dmar->width + 1);
464
465 entry_header = (struct acpi_dmar_header *)(dmar + 1);
466 while (((unsigned long)entry_header) <
467 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
468 /* Avoid looping forever on bad ACPI tables */
469 if (entry_header->length == 0) {
470 printk(KERN_WARNING PREFIX
471 "Invalid 0-length structure\n");
472 ret = -EINVAL;
473 break;
474 }
475
10e5247f
KA
476 dmar_table_print_dmar_entry(entry_header);
477
478 switch (entry_header->type) {
479 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
480 ret = dmar_parse_one_drhd(entry_header);
481 break;
482 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 483#ifdef CONFIG_DMAR
10e5247f 484 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
485#endif
486 break;
487 case ACPI_DMAR_TYPE_ATSR:
488#ifdef CONFIG_DMAR
489 ret = dmar_parse_one_atsr(entry_header);
aaa9d1dd 490#endif
10e5247f 491 break;
17b60977 492 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 493#ifdef CONFIG_ACPI_NUMA
ee34b32d 494 ret = dmar_parse_one_rhsa(entry_header);
aa697079 495#endif
17b60977 496 break;
10e5247f
KA
497 default:
498 printk(KERN_WARNING PREFIX
4de75cf9
RD
499 "Unknown DMAR structure type %d\n",
500 entry_header->type);
10e5247f
KA
501 ret = 0; /* for forward compatibility */
502 break;
503 }
504 if (ret)
505 break;
506
507 entry_header = ((void *)entry_header + entry_header->length);
508 }
509 return ret;
510}
511
dda56549 512static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
e61d98d8
SS
513 struct pci_dev *dev)
514{
515 int index;
516
517 while (dev) {
518 for (index = 0; index < cnt; index++)
519 if (dev == devices[index])
520 return 1;
521
522 /* Check our parent */
523 dev = dev->bus->self;
524 }
525
526 return 0;
527}
528
529struct dmar_drhd_unit *
530dmar_find_matched_drhd_unit(struct pci_dev *dev)
531{
2e824f79
YZ
532 struct dmar_drhd_unit *dmaru = NULL;
533 struct acpi_dmar_hardware_unit *drhd;
534
dda56549
Y
535 dev = pci_physfn(dev);
536
2e824f79
YZ
537 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
538 drhd = container_of(dmaru->hdr,
539 struct acpi_dmar_hardware_unit,
540 header);
541
542 if (dmaru->include_all &&
543 drhd->segment == pci_domain_nr(dev->bus))
544 return dmaru;
e61d98d8 545
2e824f79
YZ
546 if (dmar_pci_device_match(dmaru->devices,
547 dmaru->devices_cnt, dev))
548 return dmaru;
e61d98d8
SS
549 }
550
551 return NULL;
552}
553
1886e8a9
SS
554int __init dmar_dev_scope_init(void)
555{
04e2ea67 556 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
557 int ret = -ENODEV;
558
04e2ea67 559 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
560 ret = dmar_parse_dev(drhd);
561 if (ret)
562 return ret;
563 }
564
aaa9d1dd
SS
565#ifdef CONFIG_DMAR
566 {
04e2ea67 567 struct dmar_rmrr_unit *rmrr, *rmrr_n;
aa5d2b51
YZ
568 struct dmar_atsr_unit *atsr, *atsr_n;
569
04e2ea67 570 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
571 ret = rmrr_parse_dev(rmrr);
572 if (ret)
573 return ret;
574 }
aa5d2b51
YZ
575
576 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
577 ret = atsr_parse_dev(atsr);
578 if (ret)
579 return ret;
580 }
1886e8a9 581 }
aaa9d1dd 582#endif
1886e8a9
SS
583
584 return ret;
585}
586
10e5247f
KA
587
588int __init dmar_table_init(void)
589{
1886e8a9 590 static int dmar_table_initialized;
093f87d2
FY
591 int ret;
592
1886e8a9
SS
593 if (dmar_table_initialized)
594 return 0;
595
596 dmar_table_initialized = 1;
597
093f87d2
FY
598 ret = parse_dmar_table();
599 if (ret) {
1886e8a9
SS
600 if (ret != -ENODEV)
601 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
602 return ret;
603 }
604
10e5247f
KA
605 if (list_empty(&dmar_drhd_units)) {
606 printk(KERN_INFO PREFIX "No DMAR devices found\n");
607 return -ENODEV;
608 }
093f87d2 609
aaa9d1dd 610#ifdef CONFIG_DMAR
2d6b5f85 611 if (list_empty(&dmar_rmrr_units))
093f87d2 612 printk(KERN_INFO PREFIX "No RMRR found\n");
aa5d2b51
YZ
613
614 if (list_empty(&dmar_atsr_units))
615 printk(KERN_INFO PREFIX "No ATSR found\n");
aaa9d1dd 616#endif
093f87d2 617
10e5247f
KA
618 return 0;
619}
620
3a8663ee
BH
621static void warn_invalid_dmar(u64 addr, const char *message)
622{
623 WARN_ONCE(1, "Your BIOS is broken; DMAR reported at address %llx%s!\n"
624 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
625 addr, message,
626 dmi_get_system_info(DMI_BIOS_VENDOR),
627 dmi_get_system_info(DMI_BIOS_VERSION),
628 dmi_get_system_info(DMI_PRODUCT_VERSION));
629}
6ecbf01c 630
86cf898e
DW
631int __init check_zero_address(void)
632{
633 struct acpi_table_dmar *dmar;
634 struct acpi_dmar_header *entry_header;
635 struct acpi_dmar_hardware_unit *drhd;
636
637 dmar = (struct acpi_table_dmar *)dmar_tbl;
638 entry_header = (struct acpi_dmar_header *)(dmar + 1);
639
640 while (((unsigned long)entry_header) <
641 (((unsigned long)dmar) + dmar_tbl->length)) {
642 /* Avoid looping forever on bad ACPI tables */
643 if (entry_header->length == 0) {
644 printk(KERN_WARNING PREFIX
645 "Invalid 0-length structure\n");
646 return 0;
647 }
648
649 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
650 void __iomem *addr;
651 u64 cap, ecap;
652
86cf898e
DW
653 drhd = (void *)entry_header;
654 if (!drhd->address) {
3a8663ee 655 warn_invalid_dmar(0, "");
2c992208
CW
656 goto failed;
657 }
658
659 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
660 if (!addr ) {
661 printk("IOMMU: can't validate: %llx\n", drhd->address);
662 goto failed;
663 }
664 cap = dmar_readq(addr + DMAR_CAP_REG);
665 ecap = dmar_readq(addr + DMAR_ECAP_REG);
666 early_iounmap(addr, VTD_PAGE_SIZE);
667 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
668 warn_invalid_dmar(drhd->address,
669 " returns all ones");
2c992208 670 goto failed;
86cf898e 671 }
86cf898e
DW
672 }
673
674 entry_header = ((void *)entry_header + entry_header->length);
675 }
676 return 1;
2c992208
CW
677
678failed:
679#ifdef CONFIG_DMAR
680 dmar_disabled = 1;
681#endif
682 return 0;
86cf898e
DW
683}
684
2ae21010
SS
685void __init detect_intel_iommu(void)
686{
687 int ret;
688
f6dd5c31 689 ret = dmar_table_detect();
86cf898e
DW
690 if (ret)
691 ret = check_zero_address();
2ae21010 692 {
cacd4213 693#ifdef CONFIG_INTR_REMAP
1cb11583
SS
694 struct acpi_table_dmar *dmar;
695 /*
696 * for now we will disable dma-remapping when interrupt
697 * remapping is enabled.
698 * When support for queued invalidation for IOTLB invalidation
699 * is added, we will not need this any more.
700 */
701 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 702 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
703 printk(KERN_INFO
704 "Queued invalidation will be enabled to support "
705 "x2apic and Intr-remapping.\n");
cacd4213 706#endif
cacd4213 707#ifdef CONFIG_DMAR
11bd04f6 708 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 709 iommu_detected = 1;
5d990b62
CW
710 /* Make sure ACS will be enabled */
711 pci_request_acs();
712 }
9d5ce73a
FT
713#endif
714#ifdef CONFIG_X86
715 if (ret)
716 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 717#endif
cacd4213 718 }
8e1568f3 719 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 720 dmar_tbl = NULL;
2ae21010
SS
721}
722
723
1886e8a9 724int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 725{
c42d9f32 726 struct intel_iommu *iommu;
e61d98d8
SS
727 int map_size;
728 u32 ver;
c42d9f32 729 static int iommu_allocated = 0;
43f7392b 730 int agaw = 0;
4ed0d3e6 731 int msagaw = 0;
c42d9f32 732
6ecbf01c 733 if (!drhd->reg_base_addr) {
3a8663ee 734 warn_invalid_dmar(0, "");
6ecbf01c
DW
735 return -EINVAL;
736 }
737
c42d9f32
SS
738 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
739 if (!iommu)
1886e8a9 740 return -ENOMEM;
c42d9f32
SS
741
742 iommu->seq_id = iommu_allocated++;
9d783ba0 743 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 744
5b6985ce 745 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
746 if (!iommu->reg) {
747 printk(KERN_ERR "IOMMU: can't map the region\n");
748 goto error;
749 }
750 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
751 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
752
0815565a 753 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
3a8663ee 754 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
0815565a
DW
755 goto err_unmap;
756 }
757
43f7392b 758#ifdef CONFIG_DMAR
1b573683
WH
759 agaw = iommu_calculate_agaw(iommu);
760 if (agaw < 0) {
761 printk(KERN_ERR
4ed0d3e6
FY
762 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
763 iommu->seq_id);
0815565a 764 goto err_unmap;
4ed0d3e6
FY
765 }
766 msagaw = iommu_calculate_max_sagaw(iommu);
767 if (msagaw < 0) {
768 printk(KERN_ERR
769 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 770 iommu->seq_id);
0815565a 771 goto err_unmap;
1b573683 772 }
43f7392b 773#endif
1b573683 774 iommu->agaw = agaw;
4ed0d3e6 775 iommu->msagaw = msagaw;
1b573683 776
ee34b32d
SS
777 iommu->node = -1;
778
e61d98d8
SS
779 /* the registers might be more than one page */
780 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
781 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
782 map_size = VTD_PAGE_ALIGN(map_size);
783 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
784 iounmap(iommu->reg);
785 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
786 if (!iommu->reg) {
787 printk(KERN_ERR "IOMMU: can't map the region\n");
788 goto error;
789 }
790 }
791
792 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
793 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
794 iommu->seq_id,
5b6985ce
FY
795 (unsigned long long)drhd->reg_base_addr,
796 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
797 (unsigned long long)iommu->cap,
798 (unsigned long long)iommu->ecap);
e61d98d8
SS
799
800 spin_lock_init(&iommu->register_lock);
801
802 drhd->iommu = iommu;
1886e8a9 803 return 0;
0815565a
DW
804
805 err_unmap:
806 iounmap(iommu->reg);
807 error:
e61d98d8 808 kfree(iommu);
1886e8a9 809 return -1;
e61d98d8
SS
810}
811
812void free_iommu(struct intel_iommu *iommu)
813{
814 if (!iommu)
815 return;
816
817#ifdef CONFIG_DMAR
818 free_dmar_iommu(iommu);
819#endif
820
821 if (iommu->reg)
822 iounmap(iommu->reg);
823 kfree(iommu);
824}
fe962e90
SS
825
826/*
827 * Reclaim all the submitted descriptors which have completed its work.
828 */
829static inline void reclaim_free_desc(struct q_inval *qi)
830{
6ba6c3a4
YZ
831 while (qi->desc_status[qi->free_tail] == QI_DONE ||
832 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
833 qi->desc_status[qi->free_tail] = QI_FREE;
834 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
835 qi->free_cnt++;
836 }
837}
838
704126ad
YZ
839static int qi_check_fault(struct intel_iommu *iommu, int index)
840{
841 u32 fault;
6ba6c3a4 842 int head, tail;
704126ad
YZ
843 struct q_inval *qi = iommu->qi;
844 int wait_index = (index + 1) % QI_LENGTH;
845
6ba6c3a4
YZ
846 if (qi->desc_status[wait_index] == QI_ABORT)
847 return -EAGAIN;
848
704126ad
YZ
849 fault = readl(iommu->reg + DMAR_FSTS_REG);
850
851 /*
852 * If IQE happens, the head points to the descriptor associated
853 * with the error. No new descriptors are fetched until the IQE
854 * is cleared.
855 */
856 if (fault & DMA_FSTS_IQE) {
857 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
858 if ((head >> DMAR_IQ_SHIFT) == index) {
859 printk(KERN_ERR "VT-d detected invalid descriptor: "
860 "low=%llx, high=%llx\n",
861 (unsigned long long)qi->desc[index].low,
862 (unsigned long long)qi->desc[index].high);
704126ad
YZ
863 memcpy(&qi->desc[index], &qi->desc[wait_index],
864 sizeof(struct qi_desc));
865 __iommu_flush_cache(iommu, &qi->desc[index],
866 sizeof(struct qi_desc));
867 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
868 return -EINVAL;
869 }
870 }
871
6ba6c3a4
YZ
872 /*
873 * If ITE happens, all pending wait_desc commands are aborted.
874 * No new descriptors are fetched until the ITE is cleared.
875 */
876 if (fault & DMA_FSTS_ITE) {
877 head = readl(iommu->reg + DMAR_IQH_REG);
878 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
879 head |= 1;
880 tail = readl(iommu->reg + DMAR_IQT_REG);
881 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
882
883 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
884
885 do {
886 if (qi->desc_status[head] == QI_IN_USE)
887 qi->desc_status[head] = QI_ABORT;
888 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
889 } while (head != tail);
890
891 if (qi->desc_status[wait_index] == QI_ABORT)
892 return -EAGAIN;
893 }
894
895 if (fault & DMA_FSTS_ICE)
896 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
897
704126ad
YZ
898 return 0;
899}
900
fe962e90
SS
901/*
902 * Submit the queued invalidation descriptor to the remapping
903 * hardware unit and wait for its completion.
904 */
704126ad 905int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 906{
6ba6c3a4 907 int rc;
fe962e90
SS
908 struct q_inval *qi = iommu->qi;
909 struct qi_desc *hw, wait_desc;
910 int wait_index, index;
911 unsigned long flags;
912
913 if (!qi)
704126ad 914 return 0;
fe962e90
SS
915
916 hw = qi->desc;
917
6ba6c3a4
YZ
918restart:
919 rc = 0;
920
f05810c9 921 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 922 while (qi->free_cnt < 3) {
f05810c9 923 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 924 cpu_relax();
f05810c9 925 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
926 }
927
928 index = qi->free_head;
929 wait_index = (index + 1) % QI_LENGTH;
930
931 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
932
933 hw[index] = *desc;
934
704126ad
YZ
935 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
936 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
937 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
938
939 hw[wait_index] = wait_desc;
940
941 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
942 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
943
944 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
945 qi->free_cnt -= 2;
946
fe962e90
SS
947 /*
948 * update the HW tail register indicating the presence of
949 * new descriptors.
950 */
6ba6c3a4 951 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
952
953 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
954 /*
955 * We will leave the interrupts disabled, to prevent interrupt
956 * context to queue another cmd while a cmd is already submitted
957 * and waiting for completion on this cpu. This is to avoid
958 * a deadlock where the interrupt context can wait indefinitely
959 * for free slots in the queue.
960 */
704126ad
YZ
961 rc = qi_check_fault(iommu, index);
962 if (rc)
6ba6c3a4 963 break;
704126ad 964
fe962e90
SS
965 spin_unlock(&qi->q_lock);
966 cpu_relax();
967 spin_lock(&qi->q_lock);
968 }
6ba6c3a4
YZ
969
970 qi->desc_status[index] = QI_DONE;
fe962e90
SS
971
972 reclaim_free_desc(qi);
f05810c9 973 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 974
6ba6c3a4
YZ
975 if (rc == -EAGAIN)
976 goto restart;
977
704126ad 978 return rc;
fe962e90
SS
979}
980
981/*
982 * Flush the global interrupt entry cache.
983 */
984void qi_global_iec(struct intel_iommu *iommu)
985{
986 struct qi_desc desc;
987
988 desc.low = QI_IEC_TYPE;
989 desc.high = 0;
990
704126ad 991 /* should never fail */
fe962e90
SS
992 qi_submit_sync(&desc, iommu);
993}
994
4c25a2c1
DW
995void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
996 u64 type)
3481f210 997{
3481f210
YS
998 struct qi_desc desc;
999
3481f210
YS
1000 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1001 | QI_CC_GRAN(type) | QI_CC_TYPE;
1002 desc.high = 0;
1003
4c25a2c1 1004 qi_submit_sync(&desc, iommu);
3481f210
YS
1005}
1006
1f0ef2aa
DW
1007void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1008 unsigned int size_order, u64 type)
3481f210
YS
1009{
1010 u8 dw = 0, dr = 0;
1011
1012 struct qi_desc desc;
1013 int ih = 0;
1014
3481f210
YS
1015 if (cap_write_drain(iommu->cap))
1016 dw = 1;
1017
1018 if (cap_read_drain(iommu->cap))
1019 dr = 1;
1020
1021 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1022 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1023 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1024 | QI_IOTLB_AM(size_order);
1025
1f0ef2aa 1026 qi_submit_sync(&desc, iommu);
3481f210
YS
1027}
1028
6ba6c3a4
YZ
1029void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1030 u64 addr, unsigned mask)
1031{
1032 struct qi_desc desc;
1033
1034 if (mask) {
1035 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1036 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1037 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1038 } else
1039 desc.high = QI_DEV_IOTLB_ADDR(addr);
1040
1041 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1042 qdep = 0;
1043
1044 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1045 QI_DIOTLB_TYPE;
1046
1047 qi_submit_sync(&desc, iommu);
1048}
1049
eba67e5d
SS
1050/*
1051 * Disable Queued Invalidation interface.
1052 */
1053void dmar_disable_qi(struct intel_iommu *iommu)
1054{
1055 unsigned long flags;
1056 u32 sts;
1057 cycles_t start_time = get_cycles();
1058
1059 if (!ecap_qis(iommu->ecap))
1060 return;
1061
1062 spin_lock_irqsave(&iommu->register_lock, flags);
1063
1064 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1065 if (!(sts & DMA_GSTS_QIES))
1066 goto end;
1067
1068 /*
1069 * Give a chance to HW to complete the pending invalidation requests.
1070 */
1071 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1072 readl(iommu->reg + DMAR_IQH_REG)) &&
1073 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1074 cpu_relax();
1075
1076 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1077 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1078
1079 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1080 !(sts & DMA_GSTS_QIES), sts);
1081end:
1082 spin_unlock_irqrestore(&iommu->register_lock, flags);
1083}
1084
eb4a52bc
FY
1085/*
1086 * Enable queued invalidation.
1087 */
1088static void __dmar_enable_qi(struct intel_iommu *iommu)
1089{
c416daa9 1090 u32 sts;
eb4a52bc
FY
1091 unsigned long flags;
1092 struct q_inval *qi = iommu->qi;
1093
1094 qi->free_head = qi->free_tail = 0;
1095 qi->free_cnt = QI_LENGTH;
1096
1097 spin_lock_irqsave(&iommu->register_lock, flags);
1098
1099 /* write zero to the tail reg */
1100 writel(0, iommu->reg + DMAR_IQT_REG);
1101
1102 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1103
eb4a52bc 1104 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1105 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1106
1107 /* Make sure hardware complete it */
1108 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1109
1110 spin_unlock_irqrestore(&iommu->register_lock, flags);
1111}
1112
fe962e90
SS
1113/*
1114 * Enable Queued Invalidation interface. This is a must to support
1115 * interrupt-remapping. Also used by DMA-remapping, which replaces
1116 * register based IOTLB invalidation.
1117 */
1118int dmar_enable_qi(struct intel_iommu *iommu)
1119{
fe962e90 1120 struct q_inval *qi;
751cafe3 1121 struct page *desc_page;
fe962e90
SS
1122
1123 if (!ecap_qis(iommu->ecap))
1124 return -ENOENT;
1125
1126 /*
1127 * queued invalidation is already setup and enabled.
1128 */
1129 if (iommu->qi)
1130 return 0;
1131
fa4b57cc 1132 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1133 if (!iommu->qi)
1134 return -ENOMEM;
1135
1136 qi = iommu->qi;
1137
751cafe3
SS
1138
1139 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1140 if (!desc_page) {
fe962e90
SS
1141 kfree(qi);
1142 iommu->qi = 0;
1143 return -ENOMEM;
1144 }
1145
751cafe3
SS
1146 qi->desc = page_address(desc_page);
1147
fa4b57cc 1148 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1149 if (!qi->desc_status) {
1150 free_page((unsigned long) qi->desc);
1151 kfree(qi);
1152 iommu->qi = 0;
1153 return -ENOMEM;
1154 }
1155
1156 qi->free_head = qi->free_tail = 0;
1157 qi->free_cnt = QI_LENGTH;
1158
1159 spin_lock_init(&qi->q_lock);
1160
eb4a52bc 1161 __dmar_enable_qi(iommu);
fe962e90
SS
1162
1163 return 0;
1164}
0ac2491f
SS
1165
1166/* iommu interrupt handling. Most stuff are MSI-like. */
1167
9d783ba0
SS
1168enum faulttype {
1169 DMA_REMAP,
1170 INTR_REMAP,
1171 UNKNOWN,
1172};
1173
1174static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1175{
1176 "Software",
1177 "Present bit in root entry is clear",
1178 "Present bit in context entry is clear",
1179 "Invalid context entry",
1180 "Access beyond MGAW",
1181 "PTE Write access is not set",
1182 "PTE Read access is not set",
1183 "Next page table ptr is invalid",
1184 "Root table address invalid",
1185 "Context table ptr is invalid",
1186 "non-zero reserved fields in RTP",
1187 "non-zero reserved fields in CTP",
1188 "non-zero reserved fields in PTE",
1189};
9d783ba0
SS
1190
1191static const char *intr_remap_fault_reasons[] =
1192{
1193 "Detected reserved fields in the decoded interrupt-remapped request",
1194 "Interrupt index exceeded the interrupt-remapping table size",
1195 "Present field in the IRTE entry is clear",
1196 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1197 "Detected reserved fields in the IRTE entry",
1198 "Blocked a compatibility format interrupt request",
1199 "Blocked an interrupt request due to source-id verification failure",
1200};
1201
0ac2491f
SS
1202#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1203
9d783ba0 1204const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1205{
9d783ba0
SS
1206 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1207 ARRAY_SIZE(intr_remap_fault_reasons))) {
1208 *fault_type = INTR_REMAP;
1209 return intr_remap_fault_reasons[fault_reason - 0x20];
1210 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1211 *fault_type = DMA_REMAP;
1212 return dma_remap_fault_reasons[fault_reason];
1213 } else {
1214 *fault_type = UNKNOWN;
0ac2491f 1215 return "Unknown";
9d783ba0 1216 }
0ac2491f
SS
1217}
1218
1219void dmar_msi_unmask(unsigned int irq)
1220{
1221 struct intel_iommu *iommu = get_irq_data(irq);
1222 unsigned long flag;
1223
1224 /* unmask it */
1225 spin_lock_irqsave(&iommu->register_lock, flag);
1226 writel(0, iommu->reg + DMAR_FECTL_REG);
1227 /* Read a reg to force flush the post write */
1228 readl(iommu->reg + DMAR_FECTL_REG);
1229 spin_unlock_irqrestore(&iommu->register_lock, flag);
1230}
1231
1232void dmar_msi_mask(unsigned int irq)
1233{
1234 unsigned long flag;
1235 struct intel_iommu *iommu = get_irq_data(irq);
1236
1237 /* mask it */
1238 spin_lock_irqsave(&iommu->register_lock, flag);
1239 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1240 /* Read a reg to force flush the post write */
1241 readl(iommu->reg + DMAR_FECTL_REG);
1242 spin_unlock_irqrestore(&iommu->register_lock, flag);
1243}
1244
1245void dmar_msi_write(int irq, struct msi_msg *msg)
1246{
1247 struct intel_iommu *iommu = get_irq_data(irq);
1248 unsigned long flag;
1249
1250 spin_lock_irqsave(&iommu->register_lock, flag);
1251 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1252 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1253 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1254 spin_unlock_irqrestore(&iommu->register_lock, flag);
1255}
1256
1257void dmar_msi_read(int irq, struct msi_msg *msg)
1258{
1259 struct intel_iommu *iommu = get_irq_data(irq);
1260 unsigned long flag;
1261
1262 spin_lock_irqsave(&iommu->register_lock, flag);
1263 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1264 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1265 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1266 spin_unlock_irqrestore(&iommu->register_lock, flag);
1267}
1268
1269static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1270 u8 fault_reason, u16 source_id, unsigned long long addr)
1271{
1272 const char *reason;
9d783ba0 1273 int fault_type;
0ac2491f 1274
9d783ba0 1275 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1276
9d783ba0
SS
1277 if (fault_type == INTR_REMAP)
1278 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1279 "fault index %llx\n"
1280 "INTR-REMAP:[fault reason %02d] %s\n",
1281 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1282 PCI_FUNC(source_id & 0xFF), addr >> 48,
1283 fault_reason, reason);
1284 else
1285 printk(KERN_ERR
1286 "DMAR:[%s] Request device [%02x:%02x.%d] "
1287 "fault addr %llx \n"
1288 "DMAR:[fault reason %02d] %s\n",
1289 (type ? "DMA Read" : "DMA Write"),
1290 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1291 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1292 return 0;
1293}
1294
1295#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1296irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1297{
1298 struct intel_iommu *iommu = dev_id;
1299 int reg, fault_index;
1300 u32 fault_status;
1301 unsigned long flag;
1302
1303 spin_lock_irqsave(&iommu->register_lock, flag);
1304 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1305 if (fault_status)
1306 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1307 fault_status);
0ac2491f
SS
1308
1309 /* TBD: ignore advanced fault log currently */
1310 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1311 goto clear_rest;
0ac2491f
SS
1312
1313 fault_index = dma_fsts_fault_record_index(fault_status);
1314 reg = cap_fault_reg_offset(iommu->cap);
1315 while (1) {
1316 u8 fault_reason;
1317 u16 source_id;
1318 u64 guest_addr;
1319 int type;
1320 u32 data;
1321
1322 /* highest 32 bits */
1323 data = readl(iommu->reg + reg +
1324 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1325 if (!(data & DMA_FRCD_F))
1326 break;
1327
1328 fault_reason = dma_frcd_fault_reason(data);
1329 type = dma_frcd_type(data);
1330
1331 data = readl(iommu->reg + reg +
1332 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1333 source_id = dma_frcd_source_id(data);
1334
1335 guest_addr = dmar_readq(iommu->reg + reg +
1336 fault_index * PRIMARY_FAULT_REG_LEN);
1337 guest_addr = dma_frcd_page_addr(guest_addr);
1338 /* clear the fault */
1339 writel(DMA_FRCD_F, iommu->reg + reg +
1340 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1341
1342 spin_unlock_irqrestore(&iommu->register_lock, flag);
1343
1344 dmar_fault_do_one(iommu, type, fault_reason,
1345 source_id, guest_addr);
1346
1347 fault_index++;
8211a7b5 1348 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f
SS
1349 fault_index = 0;
1350 spin_lock_irqsave(&iommu->register_lock, flag);
1351 }
9d783ba0
SS
1352clear_rest:
1353 /* clear all the other faults */
0ac2491f 1354 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1355 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1356
1357 spin_unlock_irqrestore(&iommu->register_lock, flag);
1358 return IRQ_HANDLED;
1359}
1360
1361int dmar_set_interrupt(struct intel_iommu *iommu)
1362{
1363 int irq, ret;
1364
9d783ba0
SS
1365 /*
1366 * Check if the fault interrupt is already initialized.
1367 */
1368 if (iommu->irq)
1369 return 0;
1370
0ac2491f
SS
1371 irq = create_irq();
1372 if (!irq) {
1373 printk(KERN_ERR "IOMMU: no free vectors\n");
1374 return -EINVAL;
1375 }
1376
1377 set_irq_data(irq, iommu);
1378 iommu->irq = irq;
1379
1380 ret = arch_setup_dmar_msi(irq);
1381 if (ret) {
1382 set_irq_data(irq, NULL);
1383 iommu->irq = 0;
1384 destroy_irq(irq);
dd726435 1385 return ret;
0ac2491f
SS
1386 }
1387
0ac2491f
SS
1388 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1389 if (ret)
1390 printk(KERN_ERR "IOMMU: can't request irq\n");
1391 return ret;
1392}
9d783ba0
SS
1393
1394int __init enable_drhd_fault_handling(void)
1395{
1396 struct dmar_drhd_unit *drhd;
1397
1398 /*
1399 * Enable fault control interrupt.
1400 */
1401 for_each_drhd_unit(drhd) {
1402 int ret;
1403 struct intel_iommu *iommu = drhd->iommu;
1404 ret = dmar_set_interrupt(iommu);
1405
1406 if (ret) {
1407 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1408 " interrupt, ret %d\n",
1409 (unsigned long long)drhd->reg_base_addr, ret);
1410 return -1;
1411 }
1412 }
1413
1414 return 0;
1415}
eb4a52bc
FY
1416
1417/*
1418 * Re-enable Queued Invalidation interface.
1419 */
1420int dmar_reenable_qi(struct intel_iommu *iommu)
1421{
1422 if (!ecap_qis(iommu->ecap))
1423 return -ENOENT;
1424
1425 if (!iommu->qi)
1426 return -ENOENT;
1427
1428 /*
1429 * First disable queued invalidation.
1430 */
1431 dmar_disable_qi(iommu);
1432 /*
1433 * Then enable queued invalidation again. Since there is no pending
1434 * invalidation requests now, it's safe to re-enable queued
1435 * invalidation.
1436 */
1437 __dmar_enable_qi(iommu);
1438
1439 return 0;
1440}
074835f0
YS
1441
1442/*
1443 * Check interrupt remapping support in DMAR table description.
1444 */
0b8973a8 1445int __init dmar_ir_support(void)
074835f0
YS
1446{
1447 struct acpi_table_dmar *dmar;
1448 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1449 if (!dmar)
1450 return 0;
074835f0
YS
1451 return dmar->flags & 0x1;
1452}