]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/pci/dmar.c
Fix handling of the HP/Acer 'DMAR at zero' BIOS error for machines with <4GiB RAM.
[net-next-2.6.git] / drivers / pci / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
38717946
KA
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
fe962e90 33#include <linux/timer.h>
0ac2491f
SS
34#include <linux/irq.h>
35#include <linux/interrupt.h>
69575d38 36#include <linux/tboot.h>
eb27cae8 37#include <linux/dmi.h>
10e5247f 38
a192a958 39#define PREFIX "DMAR: "
10e5247f
KA
40
41/* No locks are needed as DMA remapping hardware unit
42 * list is constructed at boot time and hotplug of
43 * these units are not supported by the architecture.
44 */
45LIST_HEAD(dmar_drhd_units);
10e5247f
KA
46
47static struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 48static acpi_size dmar_tbl_size;
10e5247f
KA
49
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52 /*
53 * add INCLUDE_ALL at the tail, so scan the list will find it at
54 * the very end.
55 */
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
10e5247f
KA
62static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78 /*
79 * Some BIOSes list non-exist devices in DMAR table, just
80 * ignore it
81 */
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit
168 * present in the platform
169 */
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
10e5247f 176
e523b38e 177 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
1886e8a9 182 dmaru->hdr = header;
10e5247f 183 dmaru->reg_base_addr = drhd->address;
276dbf99 184 dmaru->segment = drhd->segment;
10e5247f
KA
185 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
186
1886e8a9
SS
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
f82851a8 196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
1886e8a9
SS
197{
198 struct acpi_dmar_hardware_unit *drhd;
f82851a8 199 int ret = 0;
1886e8a9
SS
200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
2e824f79
YZ
203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
1886e8a9 207 ((void *)drhd) + drhd->header.length,
10e5247f
KA
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
1c7d1bca 210 if (ret) {
1886e8a9 211 list_del(&dmaru->list);
10e5247f 212 kfree(dmaru);
1886e8a9 213 }
10e5247f
KA
214 return ret;
215}
216
aaa9d1dd
SS
217#ifdef CONFIG_DMAR
218LIST_HEAD(dmar_rmrr_units);
219
220static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221{
222 list_add(&rmrr->list, &dmar_rmrr_units);
223}
224
225
10e5247f
KA
226static int __init
227dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228{
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
10e5247f
KA
231
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233 if (!rmrru)
234 return -ENOMEM;
235
1886e8a9 236 rmrru->hdr = header;
10e5247f
KA
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
1886e8a9
SS
240
241 dmar_register_rmrr_unit(rmrru);
242 return 0;
243}
244
245static int __init
246rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247{
248 struct acpi_dmar_reserved_memory *rmrr;
249 int ret;
250
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
10e5247f 252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
1886e8a9 253 ((void *)rmrr) + rmrr->header.length,
10e5247f
KA
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255
1886e8a9
SS
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
10e5247f 258 kfree(rmrru);
1886e8a9 259 }
10e5247f
KA
260 return ret;
261}
aa5d2b51
YZ
262
263static LIST_HEAD(dmar_atsr_units);
264
265static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
266{
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
269
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272 if (!atsru)
273 return -ENOMEM;
274
275 atsru->hdr = hdr;
276 atsru->include_all = atsr->flags & 0x1;
277
278 list_add(&atsru->list, &dmar_atsr_units);
279
280 return 0;
281}
282
283static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
284{
285 int rc;
286 struct acpi_dmar_atsr *atsr;
287
288 if (atsru->include_all)
289 return 0;
290
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
295 atsr->segment);
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
298 kfree(atsru);
299 }
300
301 return rc;
302}
303
304int dmar_find_matched_atsr_unit(struct pci_dev *dev)
305{
306 int i;
307 struct pci_bus *bus;
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
310
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
314 goto found;
315 }
316
317 return 0;
318
319found:
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
322
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0;
326
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
330 return 1;
331 break;
332 }
333 }
334
335 if (atsru->include_all)
336 return 1;
337
338 return 0;
339}
aaa9d1dd 340#endif
10e5247f
KA
341
342static void __init
343dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
344{
345 struct acpi_dmar_hardware_unit *drhd;
346 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 347 struct acpi_dmar_atsr *atsr;
17b60977 348 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
349
350 switch (header->type) {
351 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
352 drhd = container_of(header, struct acpi_dmar_hardware_unit,
353 header);
10e5247f 354 printk (KERN_INFO PREFIX
aa5d2b51
YZ
355 "DRHD base: %#016Lx flags: %#x\n",
356 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
357 break;
358 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
359 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
360 header);
10e5247f 361 printk (KERN_INFO PREFIX
aa5d2b51 362 "RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
363 (unsigned long long)rmrr->base_address,
364 (unsigned long long)rmrr->end_address);
10e5247f 365 break;
aa5d2b51
YZ
366 case ACPI_DMAR_TYPE_ATSR:
367 atsr = container_of(header, struct acpi_dmar_atsr, header);
368 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
369 break;
17b60977
RD
370 case ACPI_DMAR_HARDWARE_AFFINITY:
371 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373 (unsigned long long)rhsa->base_address,
374 rhsa->proximity_domain);
375 break;
10e5247f
KA
376 }
377}
378
f6dd5c31
YL
379/**
380 * dmar_table_detect - checks to see if the platform supports DMAR devices
381 */
382static int __init dmar_table_detect(void)
383{
384 acpi_status status = AE_OK;
385
386 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
387 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
388 (struct acpi_table_header **)&dmar_tbl,
389 &dmar_tbl_size);
f6dd5c31
YL
390
391 if (ACPI_SUCCESS(status) && !dmar_tbl) {
392 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
393 status = AE_NOT_FOUND;
394 }
395
396 return (ACPI_SUCCESS(status) ? 1 : 0);
397}
aaa9d1dd 398
10e5247f
KA
399/**
400 * parse_dmar_table - parses the DMA reporting table
401 */
402static int __init
403parse_dmar_table(void)
404{
405 struct acpi_table_dmar *dmar;
406 struct acpi_dmar_header *entry_header;
407 int ret = 0;
408
f6dd5c31
YL
409 /*
410 * Do it again, earlier dmar_tbl mapping could be mapped with
411 * fixed map.
412 */
413 dmar_table_detect();
414
a59b50e9
JC
415 /*
416 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
417 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
418 */
419 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
420
10e5247f
KA
421 dmar = (struct acpi_table_dmar *)dmar_tbl;
422 if (!dmar)
423 return -ENODEV;
424
5b6985ce 425 if (dmar->width < PAGE_SHIFT - 1) {
093f87d2 426 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
10e5247f
KA
427 return -EINVAL;
428 }
429
430 printk (KERN_INFO PREFIX "Host address width %d\n",
431 dmar->width + 1);
432
433 entry_header = (struct acpi_dmar_header *)(dmar + 1);
434 while (((unsigned long)entry_header) <
435 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
436 /* Avoid looping forever on bad ACPI tables */
437 if (entry_header->length == 0) {
438 printk(KERN_WARNING PREFIX
439 "Invalid 0-length structure\n");
440 ret = -EINVAL;
441 break;
442 }
443
10e5247f
KA
444 dmar_table_print_dmar_entry(entry_header);
445
446 switch (entry_header->type) {
447 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
448 ret = dmar_parse_one_drhd(entry_header);
449 break;
450 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aaa9d1dd 451#ifdef CONFIG_DMAR
10e5247f 452 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
453#endif
454 break;
455 case ACPI_DMAR_TYPE_ATSR:
456#ifdef CONFIG_DMAR
457 ret = dmar_parse_one_atsr(entry_header);
aaa9d1dd 458#endif
10e5247f 459 break;
17b60977
RD
460 case ACPI_DMAR_HARDWARE_AFFINITY:
461 /* We don't do anything with RHSA (yet?) */
462 break;
10e5247f
KA
463 default:
464 printk(KERN_WARNING PREFIX
4de75cf9
RD
465 "Unknown DMAR structure type %d\n",
466 entry_header->type);
10e5247f
KA
467 ret = 0; /* for forward compatibility */
468 break;
469 }
470 if (ret)
471 break;
472
473 entry_header = ((void *)entry_header + entry_header->length);
474 }
475 return ret;
476}
477
e61d98d8
SS
478int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
479 struct pci_dev *dev)
480{
481 int index;
482
483 while (dev) {
484 for (index = 0; index < cnt; index++)
485 if (dev == devices[index])
486 return 1;
487
488 /* Check our parent */
489 dev = dev->bus->self;
490 }
491
492 return 0;
493}
494
495struct dmar_drhd_unit *
496dmar_find_matched_drhd_unit(struct pci_dev *dev)
497{
2e824f79
YZ
498 struct dmar_drhd_unit *dmaru = NULL;
499 struct acpi_dmar_hardware_unit *drhd;
500
501 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
502 drhd = container_of(dmaru->hdr,
503 struct acpi_dmar_hardware_unit,
504 header);
505
506 if (dmaru->include_all &&
507 drhd->segment == pci_domain_nr(dev->bus))
508 return dmaru;
e61d98d8 509
2e824f79
YZ
510 if (dmar_pci_device_match(dmaru->devices,
511 dmaru->devices_cnt, dev))
512 return dmaru;
e61d98d8
SS
513 }
514
515 return NULL;
516}
517
1886e8a9
SS
518int __init dmar_dev_scope_init(void)
519{
04e2ea67 520 struct dmar_drhd_unit *drhd, *drhd_n;
1886e8a9
SS
521 int ret = -ENODEV;
522
04e2ea67 523 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
1886e8a9
SS
524 ret = dmar_parse_dev(drhd);
525 if (ret)
526 return ret;
527 }
528
aaa9d1dd
SS
529#ifdef CONFIG_DMAR
530 {
04e2ea67 531 struct dmar_rmrr_unit *rmrr, *rmrr_n;
aa5d2b51
YZ
532 struct dmar_atsr_unit *atsr, *atsr_n;
533
04e2ea67 534 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
aaa9d1dd
SS
535 ret = rmrr_parse_dev(rmrr);
536 if (ret)
537 return ret;
538 }
aa5d2b51
YZ
539
540 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
541 ret = atsr_parse_dev(atsr);
542 if (ret)
543 return ret;
544 }
1886e8a9 545 }
aaa9d1dd 546#endif
1886e8a9
SS
547
548 return ret;
549}
550
10e5247f
KA
551
552int __init dmar_table_init(void)
553{
1886e8a9 554 static int dmar_table_initialized;
093f87d2
FY
555 int ret;
556
1886e8a9
SS
557 if (dmar_table_initialized)
558 return 0;
559
560 dmar_table_initialized = 1;
561
093f87d2
FY
562 ret = parse_dmar_table();
563 if (ret) {
1886e8a9
SS
564 if (ret != -ENODEV)
565 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
093f87d2
FY
566 return ret;
567 }
568
10e5247f
KA
569 if (list_empty(&dmar_drhd_units)) {
570 printk(KERN_INFO PREFIX "No DMAR devices found\n");
571 return -ENODEV;
572 }
093f87d2 573
aaa9d1dd 574#ifdef CONFIG_DMAR
2d6b5f85 575 if (list_empty(&dmar_rmrr_units))
093f87d2 576 printk(KERN_INFO PREFIX "No RMRR found\n");
aa5d2b51
YZ
577
578 if (list_empty(&dmar_atsr_units))
579 printk(KERN_INFO PREFIX "No ATSR found\n");
aaa9d1dd 580#endif
093f87d2 581
10e5247f
KA
582 return 0;
583}
584
86cf898e
DW
585int __init check_zero_address(void)
586{
587 struct acpi_table_dmar *dmar;
588 struct acpi_dmar_header *entry_header;
589 struct acpi_dmar_hardware_unit *drhd;
590
591 dmar = (struct acpi_table_dmar *)dmar_tbl;
592 entry_header = (struct acpi_dmar_header *)(dmar + 1);
593
594 while (((unsigned long)entry_header) <
595 (((unsigned long)dmar) + dmar_tbl->length)) {
596 /* Avoid looping forever on bad ACPI tables */
597 if (entry_header->length == 0) {
598 printk(KERN_WARNING PREFIX
599 "Invalid 0-length structure\n");
600 return 0;
601 }
602
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604 drhd = (void *)entry_header;
605 if (!drhd->address) {
606 /* Promote an attitude of violence to a BIOS engineer today */
607 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
608 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION));
5854d9c8
DW
612#ifdef CONFIG_DMAR
613 dmar_disabled = 1;
614#endif
86cf898e
DW
615 return 0;
616 }
617 break;
618 }
619
620 entry_header = ((void *)entry_header + entry_header->length);
621 }
622 return 1;
623}
624
2ae21010
SS
625void __init detect_intel_iommu(void)
626{
627 int ret;
628
f6dd5c31 629 ret = dmar_table_detect();
86cf898e
DW
630 if (ret)
631 ret = check_zero_address();
2ae21010 632 {
cacd4213 633#ifdef CONFIG_INTR_REMAP
1cb11583
SS
634 struct acpi_table_dmar *dmar;
635 /*
636 * for now we will disable dma-remapping when interrupt
637 * remapping is enabled.
638 * When support for queued invalidation for IOTLB invalidation
639 * is added, we will not need this any more.
640 */
641 dmar = (struct acpi_table_dmar *) dmar_tbl;
cacd4213 642 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
1cb11583
SS
643 printk(KERN_INFO
644 "Queued invalidation will be enabled to support "
645 "x2apic and Intr-remapping.\n");
cacd4213 646#endif
cacd4213 647#ifdef CONFIG_DMAR
2ae21010
SS
648 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
649 !dmar_disabled)
650 iommu_detected = 1;
2ae21010 651#endif
cacd4213 652 }
8e1568f3 653 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
f6dd5c31 654 dmar_tbl = NULL;
2ae21010
SS
655}
656
657
1886e8a9 658int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 659{
c42d9f32 660 struct intel_iommu *iommu;
e61d98d8
SS
661 int map_size;
662 u32 ver;
c42d9f32 663 static int iommu_allocated = 0;
43f7392b 664 int agaw = 0;
4ed0d3e6 665 int msagaw = 0;
c42d9f32
SS
666
667 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
668 if (!iommu)
1886e8a9 669 return -ENOMEM;
c42d9f32
SS
670
671 iommu->seq_id = iommu_allocated++;
9d783ba0 672 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 673
5b6985ce 674 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
e61d98d8
SS
675 if (!iommu->reg) {
676 printk(KERN_ERR "IOMMU: can't map the region\n");
677 goto error;
678 }
679 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
680 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
681
0815565a
DW
682 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
683 /* Promote an attitude of violence to a BIOS engineer today */
684 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
685 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
686 drhd->reg_base_addr,
687 dmi_get_system_info(DMI_BIOS_VENDOR),
688 dmi_get_system_info(DMI_BIOS_VERSION),
689 dmi_get_system_info(DMI_PRODUCT_VERSION));
690 goto err_unmap;
691 }
692
43f7392b 693#ifdef CONFIG_DMAR
1b573683
WH
694 agaw = iommu_calculate_agaw(iommu);
695 if (agaw < 0) {
696 printk(KERN_ERR
4ed0d3e6
FY
697 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
698 iommu->seq_id);
0815565a 699 goto err_unmap;
4ed0d3e6
FY
700 }
701 msagaw = iommu_calculate_max_sagaw(iommu);
702 if (msagaw < 0) {
703 printk(KERN_ERR
704 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 705 iommu->seq_id);
0815565a 706 goto err_unmap;
1b573683 707 }
43f7392b 708#endif
1b573683 709 iommu->agaw = agaw;
4ed0d3e6 710 iommu->msagaw = msagaw;
1b573683 711
e61d98d8
SS
712 /* the registers might be more than one page */
713 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
714 cap_max_fault_reg_offset(iommu->cap));
5b6985ce
FY
715 map_size = VTD_PAGE_ALIGN(map_size);
716 if (map_size > VTD_PAGE_SIZE) {
e61d98d8
SS
717 iounmap(iommu->reg);
718 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
719 if (!iommu->reg) {
720 printk(KERN_ERR "IOMMU: can't map the region\n");
721 goto error;
722 }
723 }
724
725 ver = readl(iommu->reg + DMAR_VER_REG);
0815565a 726 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
5b6985ce
FY
727 (unsigned long long)drhd->reg_base_addr,
728 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
729 (unsigned long long)iommu->cap,
730 (unsigned long long)iommu->ecap);
e61d98d8
SS
731
732 spin_lock_init(&iommu->register_lock);
733
734 drhd->iommu = iommu;
1886e8a9 735 return 0;
0815565a
DW
736
737 err_unmap:
738 iounmap(iommu->reg);
739 error:
e61d98d8 740 kfree(iommu);
1886e8a9 741 return -1;
e61d98d8
SS
742}
743
744void free_iommu(struct intel_iommu *iommu)
745{
746 if (!iommu)
747 return;
748
749#ifdef CONFIG_DMAR
750 free_dmar_iommu(iommu);
751#endif
752
753 if (iommu->reg)
754 iounmap(iommu->reg);
755 kfree(iommu);
756}
fe962e90
SS
757
758/*
759 * Reclaim all the submitted descriptors which have completed its work.
760 */
761static inline void reclaim_free_desc(struct q_inval *qi)
762{
6ba6c3a4
YZ
763 while (qi->desc_status[qi->free_tail] == QI_DONE ||
764 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
765 qi->desc_status[qi->free_tail] = QI_FREE;
766 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
767 qi->free_cnt++;
768 }
769}
770
704126ad
YZ
771static int qi_check_fault(struct intel_iommu *iommu, int index)
772{
773 u32 fault;
6ba6c3a4 774 int head, tail;
704126ad
YZ
775 struct q_inval *qi = iommu->qi;
776 int wait_index = (index + 1) % QI_LENGTH;
777
6ba6c3a4
YZ
778 if (qi->desc_status[wait_index] == QI_ABORT)
779 return -EAGAIN;
780
704126ad
YZ
781 fault = readl(iommu->reg + DMAR_FSTS_REG);
782
783 /*
784 * If IQE happens, the head points to the descriptor associated
785 * with the error. No new descriptors are fetched until the IQE
786 * is cleared.
787 */
788 if (fault & DMA_FSTS_IQE) {
789 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4
YZ
790 if ((head >> DMAR_IQ_SHIFT) == index) {
791 printk(KERN_ERR "VT-d detected invalid descriptor: "
792 "low=%llx, high=%llx\n",
793 (unsigned long long)qi->desc[index].low,
794 (unsigned long long)qi->desc[index].high);
704126ad
YZ
795 memcpy(&qi->desc[index], &qi->desc[wait_index],
796 sizeof(struct qi_desc));
797 __iommu_flush_cache(iommu, &qi->desc[index],
798 sizeof(struct qi_desc));
799 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
800 return -EINVAL;
801 }
802 }
803
6ba6c3a4
YZ
804 /*
805 * If ITE happens, all pending wait_desc commands are aborted.
806 * No new descriptors are fetched until the ITE is cleared.
807 */
808 if (fault & DMA_FSTS_ITE) {
809 head = readl(iommu->reg + DMAR_IQH_REG);
810 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
811 head |= 1;
812 tail = readl(iommu->reg + DMAR_IQT_REG);
813 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
814
815 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
816
817 do {
818 if (qi->desc_status[head] == QI_IN_USE)
819 qi->desc_status[head] = QI_ABORT;
820 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
821 } while (head != tail);
822
823 if (qi->desc_status[wait_index] == QI_ABORT)
824 return -EAGAIN;
825 }
826
827 if (fault & DMA_FSTS_ICE)
828 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
829
704126ad
YZ
830 return 0;
831}
832
fe962e90
SS
833/*
834 * Submit the queued invalidation descriptor to the remapping
835 * hardware unit and wait for its completion.
836 */
704126ad 837int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 838{
6ba6c3a4 839 int rc;
fe962e90
SS
840 struct q_inval *qi = iommu->qi;
841 struct qi_desc *hw, wait_desc;
842 int wait_index, index;
843 unsigned long flags;
844
845 if (!qi)
704126ad 846 return 0;
fe962e90
SS
847
848 hw = qi->desc;
849
6ba6c3a4
YZ
850restart:
851 rc = 0;
852
f05810c9 853 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 854 while (qi->free_cnt < 3) {
f05810c9 855 spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 856 cpu_relax();
f05810c9 857 spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
858 }
859
860 index = qi->free_head;
861 wait_index = (index + 1) % QI_LENGTH;
862
863 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
864
865 hw[index] = *desc;
866
704126ad
YZ
867 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
868 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
869 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
870
871 hw[wait_index] = wait_desc;
872
873 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
874 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
875
876 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
877 qi->free_cnt -= 2;
878
fe962e90
SS
879 /*
880 * update the HW tail register indicating the presence of
881 * new descriptors.
882 */
6ba6c3a4 883 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
884
885 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
886 /*
887 * We will leave the interrupts disabled, to prevent interrupt
888 * context to queue another cmd while a cmd is already submitted
889 * and waiting for completion on this cpu. This is to avoid
890 * a deadlock where the interrupt context can wait indefinitely
891 * for free slots in the queue.
892 */
704126ad
YZ
893 rc = qi_check_fault(iommu, index);
894 if (rc)
6ba6c3a4 895 break;
704126ad 896
fe962e90
SS
897 spin_unlock(&qi->q_lock);
898 cpu_relax();
899 spin_lock(&qi->q_lock);
900 }
6ba6c3a4
YZ
901
902 qi->desc_status[index] = QI_DONE;
fe962e90
SS
903
904 reclaim_free_desc(qi);
f05810c9 905 spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 906
6ba6c3a4
YZ
907 if (rc == -EAGAIN)
908 goto restart;
909
704126ad 910 return rc;
fe962e90
SS
911}
912
913/*
914 * Flush the global interrupt entry cache.
915 */
916void qi_global_iec(struct intel_iommu *iommu)
917{
918 struct qi_desc desc;
919
920 desc.low = QI_IEC_TYPE;
921 desc.high = 0;
922
704126ad 923 /* should never fail */
fe962e90
SS
924 qi_submit_sync(&desc, iommu);
925}
926
4c25a2c1
DW
927void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
928 u64 type)
3481f210 929{
3481f210
YS
930 struct qi_desc desc;
931
3481f210
YS
932 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
933 | QI_CC_GRAN(type) | QI_CC_TYPE;
934 desc.high = 0;
935
4c25a2c1 936 qi_submit_sync(&desc, iommu);
3481f210
YS
937}
938
1f0ef2aa
DW
939void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
940 unsigned int size_order, u64 type)
3481f210
YS
941{
942 u8 dw = 0, dr = 0;
943
944 struct qi_desc desc;
945 int ih = 0;
946
3481f210
YS
947 if (cap_write_drain(iommu->cap))
948 dw = 1;
949
950 if (cap_read_drain(iommu->cap))
951 dr = 1;
952
953 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
954 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
955 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
956 | QI_IOTLB_AM(size_order);
957
1f0ef2aa 958 qi_submit_sync(&desc, iommu);
3481f210
YS
959}
960
6ba6c3a4
YZ
961void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
962 u64 addr, unsigned mask)
963{
964 struct qi_desc desc;
965
966 if (mask) {
967 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
968 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
969 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
970 } else
971 desc.high = QI_DEV_IOTLB_ADDR(addr);
972
973 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
974 qdep = 0;
975
976 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
977 QI_DIOTLB_TYPE;
978
979 qi_submit_sync(&desc, iommu);
980}
981
eba67e5d
SS
982/*
983 * Disable Queued Invalidation interface.
984 */
985void dmar_disable_qi(struct intel_iommu *iommu)
986{
987 unsigned long flags;
988 u32 sts;
989 cycles_t start_time = get_cycles();
990
991 if (!ecap_qis(iommu->ecap))
992 return;
993
994 spin_lock_irqsave(&iommu->register_lock, flags);
995
996 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
997 if (!(sts & DMA_GSTS_QIES))
998 goto end;
999
1000 /*
1001 * Give a chance to HW to complete the pending invalidation requests.
1002 */
1003 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1004 readl(iommu->reg + DMAR_IQH_REG)) &&
1005 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1006 cpu_relax();
1007
1008 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1009 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1010
1011 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1012 !(sts & DMA_GSTS_QIES), sts);
1013end:
1014 spin_unlock_irqrestore(&iommu->register_lock, flags);
1015}
1016
eb4a52bc
FY
1017/*
1018 * Enable queued invalidation.
1019 */
1020static void __dmar_enable_qi(struct intel_iommu *iommu)
1021{
c416daa9 1022 u32 sts;
eb4a52bc
FY
1023 unsigned long flags;
1024 struct q_inval *qi = iommu->qi;
1025
1026 qi->free_head = qi->free_tail = 0;
1027 qi->free_cnt = QI_LENGTH;
1028
1029 spin_lock_irqsave(&iommu->register_lock, flags);
1030
1031 /* write zero to the tail reg */
1032 writel(0, iommu->reg + DMAR_IQT_REG);
1033
1034 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1035
eb4a52bc 1036 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1037 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1038
1039 /* Make sure hardware complete it */
1040 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1041
1042 spin_unlock_irqrestore(&iommu->register_lock, flags);
1043}
1044
fe962e90
SS
1045/*
1046 * Enable Queued Invalidation interface. This is a must to support
1047 * interrupt-remapping. Also used by DMA-remapping, which replaces
1048 * register based IOTLB invalidation.
1049 */
1050int dmar_enable_qi(struct intel_iommu *iommu)
1051{
fe962e90
SS
1052 struct q_inval *qi;
1053
1054 if (!ecap_qis(iommu->ecap))
1055 return -ENOENT;
1056
1057 /*
1058 * queued invalidation is already setup and enabled.
1059 */
1060 if (iommu->qi)
1061 return 0;
1062
fa4b57cc 1063 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1064 if (!iommu->qi)
1065 return -ENOMEM;
1066
1067 qi = iommu->qi;
1068
fa4b57cc 1069 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
fe962e90
SS
1070 if (!qi->desc) {
1071 kfree(qi);
1072 iommu->qi = 0;
1073 return -ENOMEM;
1074 }
1075
fa4b57cc 1076 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1077 if (!qi->desc_status) {
1078 free_page((unsigned long) qi->desc);
1079 kfree(qi);
1080 iommu->qi = 0;
1081 return -ENOMEM;
1082 }
1083
1084 qi->free_head = qi->free_tail = 0;
1085 qi->free_cnt = QI_LENGTH;
1086
1087 spin_lock_init(&qi->q_lock);
1088
eb4a52bc 1089 __dmar_enable_qi(iommu);
fe962e90
SS
1090
1091 return 0;
1092}
0ac2491f
SS
1093
1094/* iommu interrupt handling. Most stuff are MSI-like. */
1095
9d783ba0
SS
1096enum faulttype {
1097 DMA_REMAP,
1098 INTR_REMAP,
1099 UNKNOWN,
1100};
1101
1102static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1103{
1104 "Software",
1105 "Present bit in root entry is clear",
1106 "Present bit in context entry is clear",
1107 "Invalid context entry",
1108 "Access beyond MGAW",
1109 "PTE Write access is not set",
1110 "PTE Read access is not set",
1111 "Next page table ptr is invalid",
1112 "Root table address invalid",
1113 "Context table ptr is invalid",
1114 "non-zero reserved fields in RTP",
1115 "non-zero reserved fields in CTP",
1116 "non-zero reserved fields in PTE",
1117};
9d783ba0
SS
1118
1119static const char *intr_remap_fault_reasons[] =
1120{
1121 "Detected reserved fields in the decoded interrupt-remapped request",
1122 "Interrupt index exceeded the interrupt-remapping table size",
1123 "Present field in the IRTE entry is clear",
1124 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1125 "Detected reserved fields in the IRTE entry",
1126 "Blocked a compatibility format interrupt request",
1127 "Blocked an interrupt request due to source-id verification failure",
1128};
1129
0ac2491f
SS
1130#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1131
9d783ba0 1132const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1133{
9d783ba0
SS
1134 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1135 ARRAY_SIZE(intr_remap_fault_reasons))) {
1136 *fault_type = INTR_REMAP;
1137 return intr_remap_fault_reasons[fault_reason - 0x20];
1138 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1139 *fault_type = DMA_REMAP;
1140 return dma_remap_fault_reasons[fault_reason];
1141 } else {
1142 *fault_type = UNKNOWN;
0ac2491f 1143 return "Unknown";
9d783ba0 1144 }
0ac2491f
SS
1145}
1146
1147void dmar_msi_unmask(unsigned int irq)
1148{
1149 struct intel_iommu *iommu = get_irq_data(irq);
1150 unsigned long flag;
1151
1152 /* unmask it */
1153 spin_lock_irqsave(&iommu->register_lock, flag);
1154 writel(0, iommu->reg + DMAR_FECTL_REG);
1155 /* Read a reg to force flush the post write */
1156 readl(iommu->reg + DMAR_FECTL_REG);
1157 spin_unlock_irqrestore(&iommu->register_lock, flag);
1158}
1159
1160void dmar_msi_mask(unsigned int irq)
1161{
1162 unsigned long flag;
1163 struct intel_iommu *iommu = get_irq_data(irq);
1164
1165 /* mask it */
1166 spin_lock_irqsave(&iommu->register_lock, flag);
1167 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1168 /* Read a reg to force flush the post write */
1169 readl(iommu->reg + DMAR_FECTL_REG);
1170 spin_unlock_irqrestore(&iommu->register_lock, flag);
1171}
1172
1173void dmar_msi_write(int irq, struct msi_msg *msg)
1174{
1175 struct intel_iommu *iommu = get_irq_data(irq);
1176 unsigned long flag;
1177
1178 spin_lock_irqsave(&iommu->register_lock, flag);
1179 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1180 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1181 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1182 spin_unlock_irqrestore(&iommu->register_lock, flag);
1183}
1184
1185void dmar_msi_read(int irq, struct msi_msg *msg)
1186{
1187 struct intel_iommu *iommu = get_irq_data(irq);
1188 unsigned long flag;
1189
1190 spin_lock_irqsave(&iommu->register_lock, flag);
1191 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1192 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1193 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1194 spin_unlock_irqrestore(&iommu->register_lock, flag);
1195}
1196
1197static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1198 u8 fault_reason, u16 source_id, unsigned long long addr)
1199{
1200 const char *reason;
9d783ba0 1201 int fault_type;
0ac2491f 1202
9d783ba0 1203 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1204
9d783ba0
SS
1205 if (fault_type == INTR_REMAP)
1206 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1207 "fault index %llx\n"
1208 "INTR-REMAP:[fault reason %02d] %s\n",
1209 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1210 PCI_FUNC(source_id & 0xFF), addr >> 48,
1211 fault_reason, reason);
1212 else
1213 printk(KERN_ERR
1214 "DMAR:[%s] Request device [%02x:%02x.%d] "
1215 "fault addr %llx \n"
1216 "DMAR:[fault reason %02d] %s\n",
1217 (type ? "DMA Read" : "DMA Write"),
1218 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1219 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1220 return 0;
1221}
1222
1223#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1224irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1225{
1226 struct intel_iommu *iommu = dev_id;
1227 int reg, fault_index;
1228 u32 fault_status;
1229 unsigned long flag;
1230
1231 spin_lock_irqsave(&iommu->register_lock, flag);
1232 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1233 if (fault_status)
1234 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1235 fault_status);
0ac2491f
SS
1236
1237 /* TBD: ignore advanced fault log currently */
1238 if (!(fault_status & DMA_FSTS_PPF))
9d783ba0 1239 goto clear_rest;
0ac2491f
SS
1240
1241 fault_index = dma_fsts_fault_record_index(fault_status);
1242 reg = cap_fault_reg_offset(iommu->cap);
1243 while (1) {
1244 u8 fault_reason;
1245 u16 source_id;
1246 u64 guest_addr;
1247 int type;
1248 u32 data;
1249
1250 /* highest 32 bits */
1251 data = readl(iommu->reg + reg +
1252 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1253 if (!(data & DMA_FRCD_F))
1254 break;
1255
1256 fault_reason = dma_frcd_fault_reason(data);
1257 type = dma_frcd_type(data);
1258
1259 data = readl(iommu->reg + reg +
1260 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1261 source_id = dma_frcd_source_id(data);
1262
1263 guest_addr = dmar_readq(iommu->reg + reg +
1264 fault_index * PRIMARY_FAULT_REG_LEN);
1265 guest_addr = dma_frcd_page_addr(guest_addr);
1266 /* clear the fault */
1267 writel(DMA_FRCD_F, iommu->reg + reg +
1268 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1269
1270 spin_unlock_irqrestore(&iommu->register_lock, flag);
1271
1272 dmar_fault_do_one(iommu, type, fault_reason,
1273 source_id, guest_addr);
1274
1275 fault_index++;
8211a7b5 1276 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f
SS
1277 fault_index = 0;
1278 spin_lock_irqsave(&iommu->register_lock, flag);
1279 }
9d783ba0
SS
1280clear_rest:
1281 /* clear all the other faults */
0ac2491f 1282 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1283 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
0ac2491f
SS
1284
1285 spin_unlock_irqrestore(&iommu->register_lock, flag);
1286 return IRQ_HANDLED;
1287}
1288
1289int dmar_set_interrupt(struct intel_iommu *iommu)
1290{
1291 int irq, ret;
1292
9d783ba0
SS
1293 /*
1294 * Check if the fault interrupt is already initialized.
1295 */
1296 if (iommu->irq)
1297 return 0;
1298
0ac2491f
SS
1299 irq = create_irq();
1300 if (!irq) {
1301 printk(KERN_ERR "IOMMU: no free vectors\n");
1302 return -EINVAL;
1303 }
1304
1305 set_irq_data(irq, iommu);
1306 iommu->irq = irq;
1307
1308 ret = arch_setup_dmar_msi(irq);
1309 if (ret) {
1310 set_irq_data(irq, NULL);
1311 iommu->irq = 0;
1312 destroy_irq(irq);
dd726435 1313 return ret;
0ac2491f
SS
1314 }
1315
0ac2491f
SS
1316 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1317 if (ret)
1318 printk(KERN_ERR "IOMMU: can't request irq\n");
1319 return ret;
1320}
9d783ba0
SS
1321
1322int __init enable_drhd_fault_handling(void)
1323{
1324 struct dmar_drhd_unit *drhd;
1325
1326 /*
1327 * Enable fault control interrupt.
1328 */
1329 for_each_drhd_unit(drhd) {
1330 int ret;
1331 struct intel_iommu *iommu = drhd->iommu;
1332 ret = dmar_set_interrupt(iommu);
1333
1334 if (ret) {
1335 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1336 " interrupt, ret %d\n",
1337 (unsigned long long)drhd->reg_base_addr, ret);
1338 return -1;
1339 }
1340 }
1341
1342 return 0;
1343}
eb4a52bc
FY
1344
1345/*
1346 * Re-enable Queued Invalidation interface.
1347 */
1348int dmar_reenable_qi(struct intel_iommu *iommu)
1349{
1350 if (!ecap_qis(iommu->ecap))
1351 return -ENOENT;
1352
1353 if (!iommu->qi)
1354 return -ENOENT;
1355
1356 /*
1357 * First disable queued invalidation.
1358 */
1359 dmar_disable_qi(iommu);
1360 /*
1361 * Then enable queued invalidation again. Since there is no pending
1362 * invalidation requests now, it's safe to re-enable queued
1363 * invalidation.
1364 */
1365 __dmar_enable_qi(iommu);
1366
1367 return 0;
1368}
074835f0
YS
1369
1370/*
1371 * Check interrupt remapping support in DMAR table description.
1372 */
1373int dmar_ir_support(void)
1374{
1375 struct acpi_table_dmar *dmar;
1376 dmar = (struct acpi_table_dmar *)dmar_tbl;
1377 return dmar->flags & 0x1;
1378}