]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/pci/intel-iommu.c
intel-iommu: Yet another BIOS workaround: Isoch DMAR unit with no TLB space
[net-next-2.6.git] / drivers / pci / intel-iommu.c
CommitLineData
ba395927
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
5b6985ce 21 * Author: Fenghua Yu <fenghua.yu@intel.com>
ba395927
KA
22 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
5e0d2a6f 26#include <linux/debugfs.h>
ba395927
KA
27#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
ba395927
KA
30#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
5e0d2a6f 35#include <linux/timer.h>
38717946 36#include <linux/iova.h>
5d450806 37#include <linux/iommu.h>
38717946 38#include <linux/intel-iommu.h>
f59c7b69 39#include <linux/sysdev.h>
69575d38 40#include <linux/tboot.h>
adb2fe02 41#include <linux/dmi.h>
ba395927 42#include <asm/cacheflush.h>
46a7fa27 43#include <asm/iommu.h>
ba395927
KA
44#include "pci.h"
45
5b6985ce
FY
46#define ROOT_SIZE VTD_PAGE_SIZE
47#define CONTEXT_SIZE VTD_PAGE_SIZE
48
ba395927
KA
49#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
50#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
e0fc7e0b 51#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
ba395927
KA
52
53#define IOAPIC_RANGE_START (0xfee00000)
54#define IOAPIC_RANGE_END (0xfeefffff)
55#define IOVA_START_ADDR (0x1000)
56
57#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
58
4ed0d3e6
FY
59#define MAX_AGAW_WIDTH 64
60
2ebe3151
DW
61#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
62#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
63
64/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
65 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
66#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
67 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
68#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
ba395927 69
f27be03b 70#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
284901a9 71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
6a35528a 72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
5e0d2a6f 73
fd18de50 74
dd4e8319
DW
75/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */
77static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
78{
79 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
80}
81
82static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
83{
84 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
85}
86static inline unsigned long page_to_dma_pfn(struct page *pg)
87{
88 return mm_to_dma_pfn(page_to_pfn(pg));
89}
90static inline unsigned long virt_to_dma_pfn(void *p)
91{
92 return page_to_dma_pfn(virt_to_page(p));
93}
94
d9630fe9
WH
95/* global iommu list, set NULL for ignored DMAR units */
96static struct intel_iommu **g_iommus;
97
e0fc7e0b 98static void __init check_tylersburg_isoch(void);
9af88143
DW
99static int rwbf_quirk;
100
46b08e1a
MM
101/*
102 * 0: Present
103 * 1-11: Reserved
104 * 12-63: Context Ptr (12 - (haw-1))
105 * 64-127: Reserved
106 */
107struct root_entry {
108 u64 val;
109 u64 rsvd1;
110};
111#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
112static inline bool root_present(struct root_entry *root)
113{
114 return (root->val & 1);
115}
116static inline void set_root_present(struct root_entry *root)
117{
118 root->val |= 1;
119}
120static inline void set_root_value(struct root_entry *root, unsigned long value)
121{
122 root->val |= value & VTD_PAGE_MASK;
123}
124
125static inline struct context_entry *
126get_context_addr_from_root(struct root_entry *root)
127{
128 return (struct context_entry *)
129 (root_present(root)?phys_to_virt(
130 root->val & VTD_PAGE_MASK) :
131 NULL);
132}
133
7a8fc25e
MM
134/*
135 * low 64 bits:
136 * 0: present
137 * 1: fault processing disable
138 * 2-3: translation type
139 * 12-63: address space root
140 * high 64 bits:
141 * 0-2: address width
142 * 3-6: aval
143 * 8-23: domain id
144 */
145struct context_entry {
146 u64 lo;
147 u64 hi;
148};
c07e7d21
MM
149
150static inline bool context_present(struct context_entry *context)
151{
152 return (context->lo & 1);
153}
154static inline void context_set_present(struct context_entry *context)
155{
156 context->lo |= 1;
157}
158
159static inline void context_set_fault_enable(struct context_entry *context)
160{
161 context->lo &= (((u64)-1) << 2) | 1;
162}
163
c07e7d21
MM
164static inline void context_set_translation_type(struct context_entry *context,
165 unsigned long value)
166{
167 context->lo &= (((u64)-1) << 4) | 3;
168 context->lo |= (value & 3) << 2;
169}
170
171static inline void context_set_address_root(struct context_entry *context,
172 unsigned long value)
173{
174 context->lo |= value & VTD_PAGE_MASK;
175}
176
177static inline void context_set_address_width(struct context_entry *context,
178 unsigned long value)
179{
180 context->hi |= value & 7;
181}
182
183static inline void context_set_domain_id(struct context_entry *context,
184 unsigned long value)
185{
186 context->hi |= (value & ((1 << 16) - 1)) << 8;
187}
188
189static inline void context_clear_entry(struct context_entry *context)
190{
191 context->lo = 0;
192 context->hi = 0;
193}
7a8fc25e 194
622ba12a
MM
195/*
196 * 0: readable
197 * 1: writable
198 * 2-6: reserved
199 * 7: super page
9cf06697
SY
200 * 8-10: available
201 * 11: snoop behavior
622ba12a
MM
202 * 12-63: Host physcial address
203 */
204struct dma_pte {
205 u64 val;
206};
622ba12a 207
19c239ce
MM
208static inline void dma_clear_pte(struct dma_pte *pte)
209{
210 pte->val = 0;
211}
212
213static inline void dma_set_pte_readable(struct dma_pte *pte)
214{
215 pte->val |= DMA_PTE_READ;
216}
217
218static inline void dma_set_pte_writable(struct dma_pte *pte)
219{
220 pte->val |= DMA_PTE_WRITE;
221}
222
9cf06697
SY
223static inline void dma_set_pte_snp(struct dma_pte *pte)
224{
225 pte->val |= DMA_PTE_SNP;
226}
227
19c239ce
MM
228static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
229{
230 pte->val = (pte->val & ~3) | (prot & 3);
231}
232
233static inline u64 dma_pte_addr(struct dma_pte *pte)
234{
c85994e4
DW
235#ifdef CONFIG_64BIT
236 return pte->val & VTD_PAGE_MASK;
237#else
238 /* Must have a full atomic 64-bit read */
239 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
240#endif
19c239ce
MM
241}
242
dd4e8319 243static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
19c239ce 244{
dd4e8319 245 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
19c239ce
MM
246}
247
248static inline bool dma_pte_present(struct dma_pte *pte)
249{
250 return (pte->val & 3) != 0;
251}
622ba12a 252
75e6bf96
DW
253static inline int first_pte_in_page(struct dma_pte *pte)
254{
255 return !((unsigned long)pte & ~VTD_PAGE_MASK);
256}
257
2c2e2c38
FY
258/*
259 * This domain is a statically identity mapping domain.
260 * 1. This domain creats a static 1:1 mapping to all usable memory.
261 * 2. It maps to each iommu if successful.
262 * 3. Each iommu mapps to this domain if successful.
263 */
19943b0e
DW
264static struct dmar_domain *si_domain;
265static int hw_pass_through = 1;
2c2e2c38 266
3b5410e7 267/* devices under the same p2p bridge are owned in one domain */
cdc7b837 268#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
3b5410e7 269
1ce28feb
WH
270/* domain represents a virtual machine, more than one devices
271 * across iommus may be owned in one domain, e.g. kvm guest.
272 */
273#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
274
2c2e2c38
FY
275/* si_domain contains mulitple devices */
276#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
277
99126f7c
MM
278struct dmar_domain {
279 int id; /* domain id */
8c11e798 280 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
99126f7c
MM
281
282 struct list_head devices; /* all devices' list */
283 struct iova_domain iovad; /* iova's that belong to this domain */
284
285 struct dma_pte *pgd; /* virtual address */
99126f7c
MM
286 int gaw; /* max guest address width */
287
288 /* adjusted guest address width, 0 is level 2 30-bit */
289 int agaw;
290
3b5410e7 291 int flags; /* flags to find out type of domain */
8e604097
WH
292
293 int iommu_coherency;/* indicate coherency of iommu access */
58c610bd 294 int iommu_snooping; /* indicate snooping control feature*/
c7151a8d
WH
295 int iommu_count; /* reference count of iommu */
296 spinlock_t iommu_lock; /* protect iommu set in domain */
fe40f1e0 297 u64 max_addr; /* maximum mapped address */
99126f7c
MM
298};
299
a647dacb
MM
300/* PCI domain-device relationship */
301struct device_domain_info {
302 struct list_head link; /* link to domain siblings */
303 struct list_head global; /* link to global list */
276dbf99
DW
304 int segment; /* PCI domain */
305 u8 bus; /* PCI bus number */
a647dacb
MM
306 u8 devfn; /* PCI devfn number */
307 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
93a23a72 308 struct intel_iommu *iommu; /* IOMMU used by this device */
a647dacb
MM
309 struct dmar_domain *domain; /* pointer to domain */
310};
311
5e0d2a6f 312static void flush_unmaps_timeout(unsigned long data);
313
314DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
315
80b20dd8 316#define HIGH_WATER_MARK 250
317struct deferred_flush_tables {
318 int next;
319 struct iova *iova[HIGH_WATER_MARK];
320 struct dmar_domain *domain[HIGH_WATER_MARK];
321};
322
323static struct deferred_flush_tables *deferred_flush;
324
5e0d2a6f 325/* bitmap for indexing intel_iommus */
5e0d2a6f 326static int g_num_of_iommus;
327
328static DEFINE_SPINLOCK(async_umap_flush_lock);
329static LIST_HEAD(unmaps_to_do);
330
331static int timer_on;
332static long list_size;
5e0d2a6f 333
ba395927
KA
334static void domain_remove_dev_info(struct dmar_domain *domain);
335
0cd5c3c8
KM
336#ifdef CONFIG_DMAR_DEFAULT_ON
337int dmar_disabled = 0;
338#else
339int dmar_disabled = 1;
340#endif /*CONFIG_DMAR_DEFAULT_ON*/
341
ba395927 342static int __initdata dmar_map_gfx = 1;
7d3b03ce 343static int dmar_forcedac;
5e0d2a6f 344static int intel_iommu_strict;
ba395927
KA
345
346#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
347static DEFINE_SPINLOCK(device_domain_lock);
348static LIST_HEAD(device_domain_list);
349
a8bcbb0d
JR
350static struct iommu_ops intel_iommu_ops;
351
ba395927
KA
352static int __init intel_iommu_setup(char *str)
353{
354 if (!str)
355 return -EINVAL;
356 while (*str) {
0cd5c3c8
KM
357 if (!strncmp(str, "on", 2)) {
358 dmar_disabled = 0;
359 printk(KERN_INFO "Intel-IOMMU: enabled\n");
360 } else if (!strncmp(str, "off", 3)) {
ba395927 361 dmar_disabled = 1;
0cd5c3c8 362 printk(KERN_INFO "Intel-IOMMU: disabled\n");
ba395927
KA
363 } else if (!strncmp(str, "igfx_off", 8)) {
364 dmar_map_gfx = 0;
365 printk(KERN_INFO
366 "Intel-IOMMU: disable GFX device mapping\n");
7d3b03ce 367 } else if (!strncmp(str, "forcedac", 8)) {
5e0d2a6f 368 printk(KERN_INFO
7d3b03ce
KA
369 "Intel-IOMMU: Forcing DAC for PCI devices\n");
370 dmar_forcedac = 1;
5e0d2a6f 371 } else if (!strncmp(str, "strict", 6)) {
372 printk(KERN_INFO
373 "Intel-IOMMU: disable batched IOTLB flush\n");
374 intel_iommu_strict = 1;
ba395927
KA
375 }
376
377 str += strcspn(str, ",");
378 while (*str == ',')
379 str++;
380 }
381 return 0;
382}
383__setup("intel_iommu=", intel_iommu_setup);
384
385static struct kmem_cache *iommu_domain_cache;
386static struct kmem_cache *iommu_devinfo_cache;
387static struct kmem_cache *iommu_iova_cache;
388
eb3fa7cb
KA
389static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
390{
391 unsigned int flags;
392 void *vaddr;
393
394 /* trying to avoid low memory issues */
395 flags = current->flags & PF_MEMALLOC;
396 current->flags |= PF_MEMALLOC;
397 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
398 current->flags &= (~PF_MEMALLOC | flags);
399 return vaddr;
400}
401
402
ba395927
KA
403static inline void *alloc_pgtable_page(void)
404{
eb3fa7cb
KA
405 unsigned int flags;
406 void *vaddr;
407
408 /* trying to avoid low memory issues */
409 flags = current->flags & PF_MEMALLOC;
410 current->flags |= PF_MEMALLOC;
411 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
412 current->flags &= (~PF_MEMALLOC | flags);
413 return vaddr;
ba395927
KA
414}
415
416static inline void free_pgtable_page(void *vaddr)
417{
418 free_page((unsigned long)vaddr);
419}
420
421static inline void *alloc_domain_mem(void)
422{
eb3fa7cb 423 return iommu_kmem_cache_alloc(iommu_domain_cache);
ba395927
KA
424}
425
38717946 426static void free_domain_mem(void *vaddr)
ba395927
KA
427{
428 kmem_cache_free(iommu_domain_cache, vaddr);
429}
430
431static inline void * alloc_devinfo_mem(void)
432{
eb3fa7cb 433 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
ba395927
KA
434}
435
436static inline void free_devinfo_mem(void *vaddr)
437{
438 kmem_cache_free(iommu_devinfo_cache, vaddr);
439}
440
441struct iova *alloc_iova_mem(void)
442{
eb3fa7cb 443 return iommu_kmem_cache_alloc(iommu_iova_cache);
ba395927
KA
444}
445
446void free_iova_mem(struct iova *iova)
447{
448 kmem_cache_free(iommu_iova_cache, iova);
449}
450
1b573683
WH
451
452static inline int width_to_agaw(int width);
453
4ed0d3e6 454static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
1b573683
WH
455{
456 unsigned long sagaw;
457 int agaw = -1;
458
459 sagaw = cap_sagaw(iommu->cap);
4ed0d3e6 460 for (agaw = width_to_agaw(max_gaw);
1b573683
WH
461 agaw >= 0; agaw--) {
462 if (test_bit(agaw, &sagaw))
463 break;
464 }
465
466 return agaw;
467}
468
4ed0d3e6
FY
469/*
470 * Calculate max SAGAW for each iommu.
471 */
472int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
473{
474 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
475}
476
477/*
478 * calculate agaw for each iommu.
479 * "SAGAW" may be different across iommus, use a default agaw, and
480 * get a supported less agaw for iommus that don't support the default agaw.
481 */
482int iommu_calculate_agaw(struct intel_iommu *iommu)
483{
484 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
485}
486
2c2e2c38 487/* This functionin only returns single iommu in a domain */
8c11e798
WH
488static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
489{
490 int iommu_id;
491
2c2e2c38 492 /* si_domain and vm domain should not get here. */
1ce28feb 493 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
2c2e2c38 494 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
1ce28feb 495
8c11e798
WH
496 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
497 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
498 return NULL;
499
500 return g_iommus[iommu_id];
501}
502
8e604097
WH
503static void domain_update_iommu_coherency(struct dmar_domain *domain)
504{
505 int i;
506
507 domain->iommu_coherency = 1;
508
509 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
510 for (; i < g_num_of_iommus; ) {
511 if (!ecap_coherent(g_iommus[i]->ecap)) {
512 domain->iommu_coherency = 0;
513 break;
514 }
515 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
516 }
517}
518
58c610bd
SY
519static void domain_update_iommu_snooping(struct dmar_domain *domain)
520{
521 int i;
522
523 domain->iommu_snooping = 1;
524
525 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
526 for (; i < g_num_of_iommus; ) {
527 if (!ecap_sc_support(g_iommus[i]->ecap)) {
528 domain->iommu_snooping = 0;
529 break;
530 }
531 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
532 }
533}
534
535/* Some capabilities may be different across iommus */
536static void domain_update_iommu_cap(struct dmar_domain *domain)
537{
538 domain_update_iommu_coherency(domain);
539 domain_update_iommu_snooping(domain);
540}
541
276dbf99 542static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
c7151a8d
WH
543{
544 struct dmar_drhd_unit *drhd = NULL;
545 int i;
546
547 for_each_drhd_unit(drhd) {
548 if (drhd->ignored)
549 continue;
276dbf99
DW
550 if (segment != drhd->segment)
551 continue;
c7151a8d 552
924b6231 553 for (i = 0; i < drhd->devices_cnt; i++) {
288e4877
DH
554 if (drhd->devices[i] &&
555 drhd->devices[i]->bus->number == bus &&
c7151a8d
WH
556 drhd->devices[i]->devfn == devfn)
557 return drhd->iommu;
4958c5dc
DW
558 if (drhd->devices[i] &&
559 drhd->devices[i]->subordinate &&
924b6231
DW
560 drhd->devices[i]->subordinate->number <= bus &&
561 drhd->devices[i]->subordinate->subordinate >= bus)
562 return drhd->iommu;
563 }
c7151a8d
WH
564
565 if (drhd->include_all)
566 return drhd->iommu;
567 }
568
569 return NULL;
570}
571
5331fe6f
WH
572static void domain_flush_cache(struct dmar_domain *domain,
573 void *addr, int size)
574{
575 if (!domain->iommu_coherency)
576 clflush_cache_range(addr, size);
577}
578
ba395927
KA
579/* Gets context entry for a given bus and devfn */
580static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
581 u8 bus, u8 devfn)
582{
583 struct root_entry *root;
584 struct context_entry *context;
585 unsigned long phy_addr;
586 unsigned long flags;
587
588 spin_lock_irqsave(&iommu->lock, flags);
589 root = &iommu->root_entry[bus];
590 context = get_context_addr_from_root(root);
591 if (!context) {
592 context = (struct context_entry *)alloc_pgtable_page();
593 if (!context) {
594 spin_unlock_irqrestore(&iommu->lock, flags);
595 return NULL;
596 }
5b6985ce 597 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
ba395927
KA
598 phy_addr = virt_to_phys((void *)context);
599 set_root_value(root, phy_addr);
600 set_root_present(root);
601 __iommu_flush_cache(iommu, root, sizeof(*root));
602 }
603 spin_unlock_irqrestore(&iommu->lock, flags);
604 return &context[devfn];
605}
606
607static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
608{
609 struct root_entry *root;
610 struct context_entry *context;
611 int ret;
612 unsigned long flags;
613
614 spin_lock_irqsave(&iommu->lock, flags);
615 root = &iommu->root_entry[bus];
616 context = get_context_addr_from_root(root);
617 if (!context) {
618 ret = 0;
619 goto out;
620 }
c07e7d21 621 ret = context_present(&context[devfn]);
ba395927
KA
622out:
623 spin_unlock_irqrestore(&iommu->lock, flags);
624 return ret;
625}
626
627static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
628{
629 struct root_entry *root;
630 struct context_entry *context;
631 unsigned long flags;
632
633 spin_lock_irqsave(&iommu->lock, flags);
634 root = &iommu->root_entry[bus];
635 context = get_context_addr_from_root(root);
636 if (context) {
c07e7d21 637 context_clear_entry(&context[devfn]);
ba395927
KA
638 __iommu_flush_cache(iommu, &context[devfn], \
639 sizeof(*context));
640 }
641 spin_unlock_irqrestore(&iommu->lock, flags);
642}
643
644static void free_context_table(struct intel_iommu *iommu)
645{
646 struct root_entry *root;
647 int i;
648 unsigned long flags;
649 struct context_entry *context;
650
651 spin_lock_irqsave(&iommu->lock, flags);
652 if (!iommu->root_entry) {
653 goto out;
654 }
655 for (i = 0; i < ROOT_ENTRY_NR; i++) {
656 root = &iommu->root_entry[i];
657 context = get_context_addr_from_root(root);
658 if (context)
659 free_pgtable_page(context);
660 }
661 free_pgtable_page(iommu->root_entry);
662 iommu->root_entry = NULL;
663out:
664 spin_unlock_irqrestore(&iommu->lock, flags);
665}
666
667/* page table handling */
668#define LEVEL_STRIDE (9)
669#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
670
671static inline int agaw_to_level(int agaw)
672{
673 return agaw + 2;
674}
675
676static inline int agaw_to_width(int agaw)
677{
678 return 30 + agaw * LEVEL_STRIDE;
679
680}
681
682static inline int width_to_agaw(int width)
683{
684 return (width - 30) / LEVEL_STRIDE;
685}
686
687static inline unsigned int level_to_offset_bits(int level)
688{
6660c63a 689 return (level - 1) * LEVEL_STRIDE;
ba395927
KA
690}
691
77dfa56c 692static inline int pfn_level_offset(unsigned long pfn, int level)
ba395927 693{
6660c63a 694 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
ba395927
KA
695}
696
6660c63a 697static inline unsigned long level_mask(int level)
ba395927 698{
6660c63a 699 return -1UL << level_to_offset_bits(level);
ba395927
KA
700}
701
6660c63a 702static inline unsigned long level_size(int level)
ba395927 703{
6660c63a 704 return 1UL << level_to_offset_bits(level);
ba395927
KA
705}
706
6660c63a 707static inline unsigned long align_to_level(unsigned long pfn, int level)
ba395927 708{
6660c63a 709 return (pfn + level_size(level) - 1) & level_mask(level);
ba395927
KA
710}
711
b026fd28
DW
712static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
713 unsigned long pfn)
ba395927 714{
b026fd28 715 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
ba395927
KA
716 struct dma_pte *parent, *pte = NULL;
717 int level = agaw_to_level(domain->agaw);
718 int offset;
ba395927
KA
719
720 BUG_ON(!domain->pgd);
b026fd28 721 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
ba395927
KA
722 parent = domain->pgd;
723
ba395927
KA
724 while (level > 0) {
725 void *tmp_page;
726
b026fd28 727 offset = pfn_level_offset(pfn, level);
ba395927
KA
728 pte = &parent[offset];
729 if (level == 1)
730 break;
731
19c239ce 732 if (!dma_pte_present(pte)) {
c85994e4
DW
733 uint64_t pteval;
734
ba395927
KA
735 tmp_page = alloc_pgtable_page();
736
206a73c1 737 if (!tmp_page)
ba395927 738 return NULL;
206a73c1 739
c85994e4 740 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
64de5af0 741 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
c85994e4
DW
742 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
743 /* Someone else set it while we were thinking; use theirs. */
744 free_pgtable_page(tmp_page);
745 } else {
746 dma_pte_addr(pte);
747 domain_flush_cache(domain, pte, sizeof(*pte));
748 }
ba395927 749 }
19c239ce 750 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
751 level--;
752 }
753
ba395927
KA
754 return pte;
755}
756
757/* return address's pte at specific level */
90dcfb5e
DW
758static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
759 unsigned long pfn,
760 int level)
ba395927
KA
761{
762 struct dma_pte *parent, *pte = NULL;
763 int total = agaw_to_level(domain->agaw);
764 int offset;
765
766 parent = domain->pgd;
767 while (level <= total) {
90dcfb5e 768 offset = pfn_level_offset(pfn, total);
ba395927
KA
769 pte = &parent[offset];
770 if (level == total)
771 return pte;
772
19c239ce 773 if (!dma_pte_present(pte))
ba395927 774 break;
19c239ce 775 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
776 total--;
777 }
778 return NULL;
779}
780
ba395927 781/* clear last level pte, a tlb flush should be followed */
595badf5
DW
782static void dma_pte_clear_range(struct dmar_domain *domain,
783 unsigned long start_pfn,
784 unsigned long last_pfn)
ba395927 785{
04b18e65 786 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
310a5ab9 787 struct dma_pte *first_pte, *pte;
66eae846 788
04b18e65 789 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
595badf5 790 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
59c36286 791 BUG_ON(start_pfn > last_pfn);
ba395927 792
04b18e65 793 /* we don't need lock here; nobody else touches the iova range */
59c36286 794 do {
310a5ab9
DW
795 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
796 if (!pte) {
797 start_pfn = align_to_level(start_pfn + 1, 2);
798 continue;
799 }
75e6bf96 800 do {
310a5ab9
DW
801 dma_clear_pte(pte);
802 start_pfn++;
803 pte++;
75e6bf96
DW
804 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
805
310a5ab9
DW
806 domain_flush_cache(domain, first_pte,
807 (void *)pte - (void *)first_pte);
59c36286
DW
808
809 } while (start_pfn && start_pfn <= last_pfn);
ba395927
KA
810}
811
812/* free page table pages. last level pte should already be cleared */
813static void dma_pte_free_pagetable(struct dmar_domain *domain,
d794dc9b
DW
814 unsigned long start_pfn,
815 unsigned long last_pfn)
ba395927 816{
6660c63a 817 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
f3a0a52f 818 struct dma_pte *first_pte, *pte;
ba395927
KA
819 int total = agaw_to_level(domain->agaw);
820 int level;
6660c63a 821 unsigned long tmp;
ba395927 822
6660c63a
DW
823 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
824 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
59c36286 825 BUG_ON(start_pfn > last_pfn);
ba395927 826
f3a0a52f 827 /* We don't need lock here; nobody else touches the iova range */
ba395927
KA
828 level = 2;
829 while (level <= total) {
6660c63a
DW
830 tmp = align_to_level(start_pfn, level);
831
f3a0a52f 832 /* If we can't even clear one PTE at this level, we're done */
6660c63a 833 if (tmp + level_size(level) - 1 > last_pfn)
ba395927
KA
834 return;
835
59c36286 836 do {
f3a0a52f
DW
837 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
838 if (!pte) {
839 tmp = align_to_level(tmp + 1, level + 1);
840 continue;
841 }
75e6bf96 842 do {
6a43e574
DW
843 if (dma_pte_present(pte)) {
844 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
845 dma_clear_pte(pte);
846 }
f3a0a52f
DW
847 pte++;
848 tmp += level_size(level);
75e6bf96
DW
849 } while (!first_pte_in_page(pte) &&
850 tmp + level_size(level) - 1 <= last_pfn);
851
f3a0a52f
DW
852 domain_flush_cache(domain, first_pte,
853 (void *)pte - (void *)first_pte);
854
59c36286 855 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
ba395927
KA
856 level++;
857 }
858 /* free pgd */
d794dc9b 859 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
ba395927
KA
860 free_pgtable_page(domain->pgd);
861 domain->pgd = NULL;
862 }
863}
864
865/* iommu handling */
866static int iommu_alloc_root_entry(struct intel_iommu *iommu)
867{
868 struct root_entry *root;
869 unsigned long flags;
870
871 root = (struct root_entry *)alloc_pgtable_page();
872 if (!root)
873 return -ENOMEM;
874
5b6985ce 875 __iommu_flush_cache(iommu, root, ROOT_SIZE);
ba395927
KA
876
877 spin_lock_irqsave(&iommu->lock, flags);
878 iommu->root_entry = root;
879 spin_unlock_irqrestore(&iommu->lock, flags);
880
881 return 0;
882}
883
ba395927
KA
884static void iommu_set_root_entry(struct intel_iommu *iommu)
885{
886 void *addr;
c416daa9 887 u32 sts;
ba395927
KA
888 unsigned long flag;
889
890 addr = iommu->root_entry;
891
892 spin_lock_irqsave(&iommu->register_lock, flag);
893 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
894
c416daa9 895 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
896
897 /* Make sure hardware complete it */
898 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 899 readl, (sts & DMA_GSTS_RTPS), sts);
ba395927
KA
900
901 spin_unlock_irqrestore(&iommu->register_lock, flag);
902}
903
904static void iommu_flush_write_buffer(struct intel_iommu *iommu)
905{
906 u32 val;
907 unsigned long flag;
908
9af88143 909 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
ba395927 910 return;
ba395927
KA
911
912 spin_lock_irqsave(&iommu->register_lock, flag);
462b60f6 913 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
914
915 /* Make sure hardware complete it */
916 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 917 readl, (!(val & DMA_GSTS_WBFS)), val);
ba395927
KA
918
919 spin_unlock_irqrestore(&iommu->register_lock, flag);
920}
921
922/* return value determine if we need a write buffer flush */
4c25a2c1
DW
923static void __iommu_flush_context(struct intel_iommu *iommu,
924 u16 did, u16 source_id, u8 function_mask,
925 u64 type)
ba395927
KA
926{
927 u64 val = 0;
928 unsigned long flag;
929
ba395927
KA
930 switch (type) {
931 case DMA_CCMD_GLOBAL_INVL:
932 val = DMA_CCMD_GLOBAL_INVL;
933 break;
934 case DMA_CCMD_DOMAIN_INVL:
935 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
936 break;
937 case DMA_CCMD_DEVICE_INVL:
938 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
939 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
940 break;
941 default:
942 BUG();
943 }
944 val |= DMA_CCMD_ICC;
945
946 spin_lock_irqsave(&iommu->register_lock, flag);
947 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
948
949 /* Make sure hardware complete it */
950 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
951 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
952
953 spin_unlock_irqrestore(&iommu->register_lock, flag);
ba395927
KA
954}
955
ba395927 956/* return value determine if we need a write buffer flush */
1f0ef2aa
DW
957static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
958 u64 addr, unsigned int size_order, u64 type)
ba395927
KA
959{
960 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
961 u64 val = 0, val_iva = 0;
962 unsigned long flag;
963
ba395927
KA
964 switch (type) {
965 case DMA_TLB_GLOBAL_FLUSH:
966 /* global flush doesn't need set IVA_REG */
967 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
968 break;
969 case DMA_TLB_DSI_FLUSH:
970 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
971 break;
972 case DMA_TLB_PSI_FLUSH:
973 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
974 /* Note: always flush non-leaf currently */
975 val_iva = size_order | addr;
976 break;
977 default:
978 BUG();
979 }
980 /* Note: set drain read/write */
981#if 0
982 /*
983 * This is probably to be super secure.. Looks like we can
984 * ignore it without any impact.
985 */
986 if (cap_read_drain(iommu->cap))
987 val |= DMA_TLB_READ_DRAIN;
988#endif
989 if (cap_write_drain(iommu->cap))
990 val |= DMA_TLB_WRITE_DRAIN;
991
992 spin_lock_irqsave(&iommu->register_lock, flag);
993 /* Note: Only uses first TLB reg currently */
994 if (val_iva)
995 dmar_writeq(iommu->reg + tlb_offset, val_iva);
996 dmar_writeq(iommu->reg + tlb_offset + 8, val);
997
998 /* Make sure hardware complete it */
999 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1000 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1001
1002 spin_unlock_irqrestore(&iommu->register_lock, flag);
1003
1004 /* check IOTLB invalidation granularity */
1005 if (DMA_TLB_IAIG(val) == 0)
1006 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1007 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1008 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
5b6985ce
FY
1009 (unsigned long long)DMA_TLB_IIRG(type),
1010 (unsigned long long)DMA_TLB_IAIG(val));
ba395927
KA
1011}
1012
93a23a72
YZ
1013static struct device_domain_info *iommu_support_dev_iotlb(
1014 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1015{
1016 int found = 0;
1017 unsigned long flags;
1018 struct device_domain_info *info;
1019 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1020
1021 if (!ecap_dev_iotlb_support(iommu->ecap))
1022 return NULL;
1023
1024 if (!iommu->qi)
1025 return NULL;
1026
1027 spin_lock_irqsave(&device_domain_lock, flags);
1028 list_for_each_entry(info, &domain->devices, link)
1029 if (info->bus == bus && info->devfn == devfn) {
1030 found = 1;
1031 break;
1032 }
1033 spin_unlock_irqrestore(&device_domain_lock, flags);
1034
1035 if (!found || !info->dev)
1036 return NULL;
1037
1038 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1039 return NULL;
1040
1041 if (!dmar_find_matched_atsr_unit(info->dev))
1042 return NULL;
1043
1044 info->iommu = iommu;
1045
1046 return info;
1047}
1048
1049static void iommu_enable_dev_iotlb(struct device_domain_info *info)
ba395927 1050{
93a23a72
YZ
1051 if (!info)
1052 return;
1053
1054 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1055}
1056
1057static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1058{
1059 if (!info->dev || !pci_ats_enabled(info->dev))
1060 return;
1061
1062 pci_disable_ats(info->dev);
1063}
1064
1065static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1066 u64 addr, unsigned mask)
1067{
1068 u16 sid, qdep;
1069 unsigned long flags;
1070 struct device_domain_info *info;
1071
1072 spin_lock_irqsave(&device_domain_lock, flags);
1073 list_for_each_entry(info, &domain->devices, link) {
1074 if (!info->dev || !pci_ats_enabled(info->dev))
1075 continue;
1076
1077 sid = info->bus << 8 | info->devfn;
1078 qdep = pci_ats_queue_depth(info->dev);
1079 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1080 }
1081 spin_unlock_irqrestore(&device_domain_lock, flags);
1082}
1083
1f0ef2aa 1084static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
03d6a246 1085 unsigned long pfn, unsigned int pages)
ba395927 1086{
9dd2fe89 1087 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
03d6a246 1088 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
ba395927 1089
ba395927
KA
1090 BUG_ON(pages == 0);
1091
ba395927 1092 /*
9dd2fe89
YZ
1093 * Fallback to domain selective flush if no PSI support or the size is
1094 * too big.
ba395927
KA
1095 * PSI requires page size to be 2 ^ x, and the base address is naturally
1096 * aligned to the size
1097 */
9dd2fe89
YZ
1098 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1099 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1f0ef2aa 1100 DMA_TLB_DSI_FLUSH);
9dd2fe89
YZ
1101 else
1102 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1103 DMA_TLB_PSI_FLUSH);
bf92df30
YZ
1104
1105 /*
1106 * In caching mode, domain ID 0 is reserved for non-present to present
1107 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1108 */
1109 if (!cap_caching_mode(iommu->cap) || did)
93a23a72 1110 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
ba395927
KA
1111}
1112
f8bab735 1113static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1114{
1115 u32 pmen;
1116 unsigned long flags;
1117
1118 spin_lock_irqsave(&iommu->register_lock, flags);
1119 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1120 pmen &= ~DMA_PMEN_EPM;
1121 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1122
1123 /* wait for the protected region status bit to clear */
1124 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1125 readl, !(pmen & DMA_PMEN_PRS), pmen);
1126
1127 spin_unlock_irqrestore(&iommu->register_lock, flags);
1128}
1129
ba395927
KA
1130static int iommu_enable_translation(struct intel_iommu *iommu)
1131{
1132 u32 sts;
1133 unsigned long flags;
1134
1135 spin_lock_irqsave(&iommu->register_lock, flags);
c416daa9
DW
1136 iommu->gcmd |= DMA_GCMD_TE;
1137 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
ba395927
KA
1138
1139 /* Make sure hardware complete it */
1140 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1141 readl, (sts & DMA_GSTS_TES), sts);
ba395927 1142
ba395927
KA
1143 spin_unlock_irqrestore(&iommu->register_lock, flags);
1144 return 0;
1145}
1146
1147static int iommu_disable_translation(struct intel_iommu *iommu)
1148{
1149 u32 sts;
1150 unsigned long flag;
1151
1152 spin_lock_irqsave(&iommu->register_lock, flag);
1153 iommu->gcmd &= ~DMA_GCMD_TE;
1154 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1155
1156 /* Make sure hardware complete it */
1157 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
c416daa9 1158 readl, (!(sts & DMA_GSTS_TES)), sts);
ba395927
KA
1159
1160 spin_unlock_irqrestore(&iommu->register_lock, flag);
1161 return 0;
1162}
1163
3460a6d9 1164
ba395927
KA
1165static int iommu_init_domains(struct intel_iommu *iommu)
1166{
1167 unsigned long ndomains;
1168 unsigned long nlongs;
1169
1170 ndomains = cap_ndoms(iommu->cap);
1171 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1172 nlongs = BITS_TO_LONGS(ndomains);
1173
94a91b50
DD
1174 spin_lock_init(&iommu->lock);
1175
ba395927
KA
1176 /* TBD: there might be 64K domains,
1177 * consider other allocation for future chip
1178 */
1179 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1180 if (!iommu->domain_ids) {
1181 printk(KERN_ERR "Allocating domain id array failed\n");
1182 return -ENOMEM;
1183 }
1184 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1185 GFP_KERNEL);
1186 if (!iommu->domains) {
1187 printk(KERN_ERR "Allocating domain array failed\n");
ba395927
KA
1188 return -ENOMEM;
1189 }
1190
1191 /*
1192 * if Caching mode is set, then invalid translations are tagged
1193 * with domainid 0. Hence we need to pre-allocate it.
1194 */
1195 if (cap_caching_mode(iommu->cap))
1196 set_bit(0, iommu->domain_ids);
1197 return 0;
1198}
ba395927 1199
ba395927
KA
1200
1201static void domain_exit(struct dmar_domain *domain);
5e98c4b1 1202static void vm_domain_exit(struct dmar_domain *domain);
e61d98d8
SS
1203
1204void free_dmar_iommu(struct intel_iommu *iommu)
ba395927
KA
1205{
1206 struct dmar_domain *domain;
1207 int i;
c7151a8d 1208 unsigned long flags;
ba395927 1209
94a91b50
DD
1210 if ((iommu->domains) && (iommu->domain_ids)) {
1211 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1212 for (; i < cap_ndoms(iommu->cap); ) {
1213 domain = iommu->domains[i];
1214 clear_bit(i, iommu->domain_ids);
1215
1216 spin_lock_irqsave(&domain->iommu_lock, flags);
1217 if (--domain->iommu_count == 0) {
1218 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1219 vm_domain_exit(domain);
1220 else
1221 domain_exit(domain);
1222 }
1223 spin_unlock_irqrestore(&domain->iommu_lock, flags);
c7151a8d 1224
94a91b50
DD
1225 i = find_next_bit(iommu->domain_ids,
1226 cap_ndoms(iommu->cap), i+1);
5e98c4b1 1227 }
ba395927
KA
1228 }
1229
1230 if (iommu->gcmd & DMA_GCMD_TE)
1231 iommu_disable_translation(iommu);
1232
1233 if (iommu->irq) {
1234 set_irq_data(iommu->irq, NULL);
1235 /* This will mask the irq */
1236 free_irq(iommu->irq, iommu);
1237 destroy_irq(iommu->irq);
1238 }
1239
1240 kfree(iommu->domains);
1241 kfree(iommu->domain_ids);
1242
d9630fe9
WH
1243 g_iommus[iommu->seq_id] = NULL;
1244
1245 /* if all iommus are freed, free g_iommus */
1246 for (i = 0; i < g_num_of_iommus; i++) {
1247 if (g_iommus[i])
1248 break;
1249 }
1250
1251 if (i == g_num_of_iommus)
1252 kfree(g_iommus);
1253
ba395927
KA
1254 /* free context mapping */
1255 free_context_table(iommu);
ba395927
KA
1256}
1257
2c2e2c38 1258static struct dmar_domain *alloc_domain(void)
ba395927 1259{
ba395927 1260 struct dmar_domain *domain;
ba395927
KA
1261
1262 domain = alloc_domain_mem();
1263 if (!domain)
1264 return NULL;
1265
2c2e2c38
FY
1266 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1267 domain->flags = 0;
1268
1269 return domain;
1270}
1271
1272static int iommu_attach_domain(struct dmar_domain *domain,
1273 struct intel_iommu *iommu)
1274{
1275 int num;
1276 unsigned long ndomains;
1277 unsigned long flags;
1278
ba395927
KA
1279 ndomains = cap_ndoms(iommu->cap);
1280
1281 spin_lock_irqsave(&iommu->lock, flags);
2c2e2c38 1282
ba395927
KA
1283 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1284 if (num >= ndomains) {
1285 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927 1286 printk(KERN_ERR "IOMMU: no free domain ids\n");
2c2e2c38 1287 return -ENOMEM;
ba395927
KA
1288 }
1289
ba395927 1290 domain->id = num;
2c2e2c38 1291 set_bit(num, iommu->domain_ids);
8c11e798 1292 set_bit(iommu->seq_id, &domain->iommu_bmp);
ba395927
KA
1293 iommu->domains[num] = domain;
1294 spin_unlock_irqrestore(&iommu->lock, flags);
1295
2c2e2c38 1296 return 0;
ba395927
KA
1297}
1298
2c2e2c38
FY
1299static void iommu_detach_domain(struct dmar_domain *domain,
1300 struct intel_iommu *iommu)
ba395927
KA
1301{
1302 unsigned long flags;
2c2e2c38
FY
1303 int num, ndomains;
1304 int found = 0;
ba395927 1305
8c11e798 1306 spin_lock_irqsave(&iommu->lock, flags);
2c2e2c38
FY
1307 ndomains = cap_ndoms(iommu->cap);
1308 num = find_first_bit(iommu->domain_ids, ndomains);
1309 for (; num < ndomains; ) {
1310 if (iommu->domains[num] == domain) {
1311 found = 1;
1312 break;
1313 }
1314 num = find_next_bit(iommu->domain_ids,
1315 cap_ndoms(iommu->cap), num+1);
1316 }
1317
1318 if (found) {
1319 clear_bit(num, iommu->domain_ids);
1320 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1321 iommu->domains[num] = NULL;
1322 }
8c11e798 1323 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927
KA
1324}
1325
1326static struct iova_domain reserved_iova_list;
8a443df4 1327static struct lock_class_key reserved_rbtree_key;
ba395927
KA
1328
1329static void dmar_init_reserved_ranges(void)
1330{
1331 struct pci_dev *pdev = NULL;
1332 struct iova *iova;
1333 int i;
ba395927 1334
f661197e 1335 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
ba395927 1336
8a443df4
MG
1337 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1338 &reserved_rbtree_key);
1339
ba395927
KA
1340 /* IOAPIC ranges shouldn't be accessed by DMA */
1341 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1342 IOVA_PFN(IOAPIC_RANGE_END));
1343 if (!iova)
1344 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1345
1346 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1347 for_each_pci_dev(pdev) {
1348 struct resource *r;
1349
1350 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1351 r = &pdev->resource[i];
1352 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1353 continue;
1a4a4551
DW
1354 iova = reserve_iova(&reserved_iova_list,
1355 IOVA_PFN(r->start),
1356 IOVA_PFN(r->end));
ba395927
KA
1357 if (!iova)
1358 printk(KERN_ERR "Reserve iova failed\n");
1359 }
1360 }
1361
1362}
1363
1364static void domain_reserve_special_ranges(struct dmar_domain *domain)
1365{
1366 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1367}
1368
1369static inline int guestwidth_to_adjustwidth(int gaw)
1370{
1371 int agaw;
1372 int r = (gaw - 12) % 9;
1373
1374 if (r == 0)
1375 agaw = gaw;
1376 else
1377 agaw = gaw + 9 - r;
1378 if (agaw > 64)
1379 agaw = 64;
1380 return agaw;
1381}
1382
1383static int domain_init(struct dmar_domain *domain, int guest_width)
1384{
1385 struct intel_iommu *iommu;
1386 int adjust_width, agaw;
1387 unsigned long sagaw;
1388
f661197e 1389 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
c7151a8d 1390 spin_lock_init(&domain->iommu_lock);
ba395927
KA
1391
1392 domain_reserve_special_ranges(domain);
1393
1394 /* calculate AGAW */
8c11e798 1395 iommu = domain_get_iommu(domain);
ba395927
KA
1396 if (guest_width > cap_mgaw(iommu->cap))
1397 guest_width = cap_mgaw(iommu->cap);
1398 domain->gaw = guest_width;
1399 adjust_width = guestwidth_to_adjustwidth(guest_width);
1400 agaw = width_to_agaw(adjust_width);
1401 sagaw = cap_sagaw(iommu->cap);
1402 if (!test_bit(agaw, &sagaw)) {
1403 /* hardware doesn't support it, choose a bigger one */
1404 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1405 agaw = find_next_bit(&sagaw, 5, agaw);
1406 if (agaw >= 5)
1407 return -ENODEV;
1408 }
1409 domain->agaw = agaw;
1410 INIT_LIST_HEAD(&domain->devices);
1411
8e604097
WH
1412 if (ecap_coherent(iommu->ecap))
1413 domain->iommu_coherency = 1;
1414 else
1415 domain->iommu_coherency = 0;
1416
58c610bd
SY
1417 if (ecap_sc_support(iommu->ecap))
1418 domain->iommu_snooping = 1;
1419 else
1420 domain->iommu_snooping = 0;
1421
c7151a8d
WH
1422 domain->iommu_count = 1;
1423
ba395927
KA
1424 /* always allocate the top pgd */
1425 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1426 if (!domain->pgd)
1427 return -ENOMEM;
5b6985ce 1428 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
ba395927
KA
1429 return 0;
1430}
1431
1432static void domain_exit(struct dmar_domain *domain)
1433{
2c2e2c38
FY
1434 struct dmar_drhd_unit *drhd;
1435 struct intel_iommu *iommu;
ba395927
KA
1436
1437 /* Domain 0 is reserved, so dont process it */
1438 if (!domain)
1439 return;
1440
1441 domain_remove_dev_info(domain);
1442 /* destroy iovas */
1443 put_iova_domain(&domain->iovad);
ba395927
KA
1444
1445 /* clear ptes */
595badf5 1446 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927
KA
1447
1448 /* free page tables */
d794dc9b 1449 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
ba395927 1450
2c2e2c38
FY
1451 for_each_active_iommu(iommu, drhd)
1452 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1453 iommu_detach_domain(domain, iommu);
1454
ba395927
KA
1455 free_domain_mem(domain);
1456}
1457
4ed0d3e6
FY
1458static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1459 u8 bus, u8 devfn, int translation)
ba395927
KA
1460{
1461 struct context_entry *context;
ba395927 1462 unsigned long flags;
5331fe6f 1463 struct intel_iommu *iommu;
ea6606b0
WH
1464 struct dma_pte *pgd;
1465 unsigned long num;
1466 unsigned long ndomains;
1467 int id;
1468 int agaw;
93a23a72 1469 struct device_domain_info *info = NULL;
ba395927
KA
1470
1471 pr_debug("Set context mapping for %02x:%02x.%d\n",
1472 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
4ed0d3e6 1473
ba395927 1474 BUG_ON(!domain->pgd);
4ed0d3e6
FY
1475 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1476 translation != CONTEXT_TT_MULTI_LEVEL);
5331fe6f 1477
276dbf99 1478 iommu = device_to_iommu(segment, bus, devfn);
5331fe6f
WH
1479 if (!iommu)
1480 return -ENODEV;
1481
ba395927
KA
1482 context = device_to_context_entry(iommu, bus, devfn);
1483 if (!context)
1484 return -ENOMEM;
1485 spin_lock_irqsave(&iommu->lock, flags);
c07e7d21 1486 if (context_present(context)) {
ba395927
KA
1487 spin_unlock_irqrestore(&iommu->lock, flags);
1488 return 0;
1489 }
1490
ea6606b0
WH
1491 id = domain->id;
1492 pgd = domain->pgd;
1493
2c2e2c38
FY
1494 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1495 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
ea6606b0
WH
1496 int found = 0;
1497
1498 /* find an available domain id for this device in iommu */
1499 ndomains = cap_ndoms(iommu->cap);
1500 num = find_first_bit(iommu->domain_ids, ndomains);
1501 for (; num < ndomains; ) {
1502 if (iommu->domains[num] == domain) {
1503 id = num;
1504 found = 1;
1505 break;
1506 }
1507 num = find_next_bit(iommu->domain_ids,
1508 cap_ndoms(iommu->cap), num+1);
1509 }
1510
1511 if (found == 0) {
1512 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1513 if (num >= ndomains) {
1514 spin_unlock_irqrestore(&iommu->lock, flags);
1515 printk(KERN_ERR "IOMMU: no free domain ids\n");
1516 return -EFAULT;
1517 }
1518
1519 set_bit(num, iommu->domain_ids);
1520 iommu->domains[num] = domain;
1521 id = num;
1522 }
1523
1524 /* Skip top levels of page tables for
1525 * iommu which has less agaw than default.
1526 */
1527 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1528 pgd = phys_to_virt(dma_pte_addr(pgd));
1529 if (!dma_pte_present(pgd)) {
1530 spin_unlock_irqrestore(&iommu->lock, flags);
1531 return -ENOMEM;
1532 }
1533 }
1534 }
1535
1536 context_set_domain_id(context, id);
4ed0d3e6 1537
93a23a72
YZ
1538 if (translation != CONTEXT_TT_PASS_THROUGH) {
1539 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1540 translation = info ? CONTEXT_TT_DEV_IOTLB :
1541 CONTEXT_TT_MULTI_LEVEL;
1542 }
4ed0d3e6
FY
1543 /*
1544 * In pass through mode, AW must be programmed to indicate the largest
1545 * AGAW value supported by hardware. And ASR is ignored by hardware.
1546 */
93a23a72 1547 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
4ed0d3e6 1548 context_set_address_width(context, iommu->msagaw);
93a23a72
YZ
1549 else {
1550 context_set_address_root(context, virt_to_phys(pgd));
1551 context_set_address_width(context, iommu->agaw);
1552 }
4ed0d3e6
FY
1553
1554 context_set_translation_type(context, translation);
c07e7d21
MM
1555 context_set_fault_enable(context);
1556 context_set_present(context);
5331fe6f 1557 domain_flush_cache(domain, context, sizeof(*context));
ba395927 1558
4c25a2c1
DW
1559 /*
1560 * It's a non-present to present mapping. If hardware doesn't cache
1561 * non-present entry we only need to flush the write-buffer. If the
1562 * _does_ cache non-present entries, then it does so in the special
1563 * domain #0, which we have to flush:
1564 */
1565 if (cap_caching_mode(iommu->cap)) {
1566 iommu->flush.flush_context(iommu, 0,
1567 (((u16)bus) << 8) | devfn,
1568 DMA_CCMD_MASK_NOBIT,
1569 DMA_CCMD_DEVICE_INVL);
1f0ef2aa 1570 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
4c25a2c1 1571 } else {
ba395927 1572 iommu_flush_write_buffer(iommu);
4c25a2c1 1573 }
93a23a72 1574 iommu_enable_dev_iotlb(info);
ba395927 1575 spin_unlock_irqrestore(&iommu->lock, flags);
c7151a8d
WH
1576
1577 spin_lock_irqsave(&domain->iommu_lock, flags);
1578 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1579 domain->iommu_count++;
58c610bd 1580 domain_update_iommu_cap(domain);
c7151a8d
WH
1581 }
1582 spin_unlock_irqrestore(&domain->iommu_lock, flags);
ba395927
KA
1583 return 0;
1584}
1585
1586static int
4ed0d3e6
FY
1587domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1588 int translation)
ba395927
KA
1589{
1590 int ret;
1591 struct pci_dev *tmp, *parent;
1592
276dbf99 1593 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
4ed0d3e6
FY
1594 pdev->bus->number, pdev->devfn,
1595 translation);
ba395927
KA
1596 if (ret)
1597 return ret;
1598
1599 /* dependent device mapping */
1600 tmp = pci_find_upstream_pcie_bridge(pdev);
1601 if (!tmp)
1602 return 0;
1603 /* Secondary interface's bus number and devfn 0 */
1604 parent = pdev->bus->self;
1605 while (parent != tmp) {
276dbf99
DW
1606 ret = domain_context_mapping_one(domain,
1607 pci_domain_nr(parent->bus),
1608 parent->bus->number,
4ed0d3e6 1609 parent->devfn, translation);
ba395927
KA
1610 if (ret)
1611 return ret;
1612 parent = parent->bus->self;
1613 }
1614 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1615 return domain_context_mapping_one(domain,
276dbf99 1616 pci_domain_nr(tmp->subordinate),
4ed0d3e6
FY
1617 tmp->subordinate->number, 0,
1618 translation);
ba395927
KA
1619 else /* this is a legacy PCI bridge */
1620 return domain_context_mapping_one(domain,
276dbf99
DW
1621 pci_domain_nr(tmp->bus),
1622 tmp->bus->number,
4ed0d3e6
FY
1623 tmp->devfn,
1624 translation);
ba395927
KA
1625}
1626
5331fe6f 1627static int domain_context_mapped(struct pci_dev *pdev)
ba395927
KA
1628{
1629 int ret;
1630 struct pci_dev *tmp, *parent;
5331fe6f
WH
1631 struct intel_iommu *iommu;
1632
276dbf99
DW
1633 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1634 pdev->devfn);
5331fe6f
WH
1635 if (!iommu)
1636 return -ENODEV;
ba395927 1637
276dbf99 1638 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
ba395927
KA
1639 if (!ret)
1640 return ret;
1641 /* dependent device mapping */
1642 tmp = pci_find_upstream_pcie_bridge(pdev);
1643 if (!tmp)
1644 return ret;
1645 /* Secondary interface's bus number and devfn 0 */
1646 parent = pdev->bus->self;
1647 while (parent != tmp) {
8c11e798 1648 ret = device_context_mapped(iommu, parent->bus->number,
276dbf99 1649 parent->devfn);
ba395927
KA
1650 if (!ret)
1651 return ret;
1652 parent = parent->bus->self;
1653 }
1654 if (tmp->is_pcie)
276dbf99
DW
1655 return device_context_mapped(iommu, tmp->subordinate->number,
1656 0);
ba395927 1657 else
276dbf99
DW
1658 return device_context_mapped(iommu, tmp->bus->number,
1659 tmp->devfn);
ba395927
KA
1660}
1661
f532959b
FY
1662/* Returns a number of VTD pages, but aligned to MM page size */
1663static inline unsigned long aligned_nrpages(unsigned long host_addr,
1664 size_t size)
1665{
1666 host_addr &= ~PAGE_MASK;
1667 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1668}
1669
9051aa02
DW
1670static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1671 struct scatterlist *sg, unsigned long phys_pfn,
1672 unsigned long nr_pages, int prot)
e1605495
DW
1673{
1674 struct dma_pte *first_pte = NULL, *pte = NULL;
9051aa02 1675 phys_addr_t uninitialized_var(pteval);
e1605495 1676 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
9051aa02 1677 unsigned long sg_res;
e1605495
DW
1678
1679 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1680
1681 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1682 return -EINVAL;
1683
1684 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1685
9051aa02
DW
1686 if (sg)
1687 sg_res = 0;
1688 else {
1689 sg_res = nr_pages + 1;
1690 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1691 }
1692
e1605495 1693 while (nr_pages--) {
c85994e4
DW
1694 uint64_t tmp;
1695
e1605495 1696 if (!sg_res) {
f532959b 1697 sg_res = aligned_nrpages(sg->offset, sg->length);
e1605495
DW
1698 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1699 sg->dma_length = sg->length;
1700 pteval = page_to_phys(sg_page(sg)) | prot;
1701 }
1702 if (!pte) {
1703 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1704 if (!pte)
1705 return -ENOMEM;
1706 }
1707 /* We don't need lock here, nobody else
1708 * touches the iova range
1709 */
7766a3fb 1710 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
c85994e4 1711 if (tmp) {
1bf20f0d 1712 static int dumps = 5;
c85994e4
DW
1713 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1714 iov_pfn, tmp, (unsigned long long)pteval);
1bf20f0d
DW
1715 if (dumps) {
1716 dumps--;
1717 debug_dma_dump_mappings(NULL);
1718 }
1719 WARN_ON(1);
1720 }
e1605495 1721 pte++;
75e6bf96 1722 if (!nr_pages || first_pte_in_page(pte)) {
e1605495
DW
1723 domain_flush_cache(domain, first_pte,
1724 (void *)pte - (void *)first_pte);
1725 pte = NULL;
1726 }
1727 iov_pfn++;
1728 pteval += VTD_PAGE_SIZE;
1729 sg_res--;
1730 if (!sg_res)
1731 sg = sg_next(sg);
1732 }
1733 return 0;
1734}
1735
9051aa02
DW
1736static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1737 struct scatterlist *sg, unsigned long nr_pages,
1738 int prot)
ba395927 1739{
9051aa02
DW
1740 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1741}
6f6a00e4 1742
9051aa02
DW
1743static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1744 unsigned long phys_pfn, unsigned long nr_pages,
1745 int prot)
1746{
1747 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
ba395927
KA
1748}
1749
c7151a8d 1750static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
ba395927 1751{
c7151a8d
WH
1752 if (!iommu)
1753 return;
8c11e798
WH
1754
1755 clear_context_table(iommu, bus, devfn);
1756 iommu->flush.flush_context(iommu, 0, 0, 0,
4c25a2c1 1757 DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 1758 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
ba395927
KA
1759}
1760
1761static void domain_remove_dev_info(struct dmar_domain *domain)
1762{
1763 struct device_domain_info *info;
1764 unsigned long flags;
c7151a8d 1765 struct intel_iommu *iommu;
ba395927
KA
1766
1767 spin_lock_irqsave(&device_domain_lock, flags);
1768 while (!list_empty(&domain->devices)) {
1769 info = list_entry(domain->devices.next,
1770 struct device_domain_info, link);
1771 list_del(&info->link);
1772 list_del(&info->global);
1773 if (info->dev)
358dd8ac 1774 info->dev->dev.archdata.iommu = NULL;
ba395927
KA
1775 spin_unlock_irqrestore(&device_domain_lock, flags);
1776
93a23a72 1777 iommu_disable_dev_iotlb(info);
276dbf99 1778 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
c7151a8d 1779 iommu_detach_dev(iommu, info->bus, info->devfn);
ba395927
KA
1780 free_devinfo_mem(info);
1781
1782 spin_lock_irqsave(&device_domain_lock, flags);
1783 }
1784 spin_unlock_irqrestore(&device_domain_lock, flags);
1785}
1786
1787/*
1788 * find_domain
358dd8ac 1789 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
ba395927 1790 */
38717946 1791static struct dmar_domain *
ba395927
KA
1792find_domain(struct pci_dev *pdev)
1793{
1794 struct device_domain_info *info;
1795
1796 /* No lock here, assumes no domain exit in normal case */
358dd8ac 1797 info = pdev->dev.archdata.iommu;
ba395927
KA
1798 if (info)
1799 return info->domain;
1800 return NULL;
1801}
1802
ba395927
KA
1803/* domain is initialized */
1804static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1805{
1806 struct dmar_domain *domain, *found = NULL;
1807 struct intel_iommu *iommu;
1808 struct dmar_drhd_unit *drhd;
1809 struct device_domain_info *info, *tmp;
1810 struct pci_dev *dev_tmp;
1811 unsigned long flags;
1812 int bus = 0, devfn = 0;
276dbf99 1813 int segment;
2c2e2c38 1814 int ret;
ba395927
KA
1815
1816 domain = find_domain(pdev);
1817 if (domain)
1818 return domain;
1819
276dbf99
DW
1820 segment = pci_domain_nr(pdev->bus);
1821
ba395927
KA
1822 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1823 if (dev_tmp) {
1824 if (dev_tmp->is_pcie) {
1825 bus = dev_tmp->subordinate->number;
1826 devfn = 0;
1827 } else {
1828 bus = dev_tmp->bus->number;
1829 devfn = dev_tmp->devfn;
1830 }
1831 spin_lock_irqsave(&device_domain_lock, flags);
1832 list_for_each_entry(info, &device_domain_list, global) {
276dbf99
DW
1833 if (info->segment == segment &&
1834 info->bus == bus && info->devfn == devfn) {
ba395927
KA
1835 found = info->domain;
1836 break;
1837 }
1838 }
1839 spin_unlock_irqrestore(&device_domain_lock, flags);
1840 /* pcie-pci bridge already has a domain, uses it */
1841 if (found) {
1842 domain = found;
1843 goto found_domain;
1844 }
1845 }
1846
2c2e2c38
FY
1847 domain = alloc_domain();
1848 if (!domain)
1849 goto error;
1850
ba395927
KA
1851 /* Allocate new domain for the device */
1852 drhd = dmar_find_matched_drhd_unit(pdev);
1853 if (!drhd) {
1854 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1855 pci_name(pdev));
1856 return NULL;
1857 }
1858 iommu = drhd->iommu;
1859
2c2e2c38
FY
1860 ret = iommu_attach_domain(domain, iommu);
1861 if (ret) {
1862 domain_exit(domain);
ba395927 1863 goto error;
2c2e2c38 1864 }
ba395927
KA
1865
1866 if (domain_init(domain, gaw)) {
1867 domain_exit(domain);
1868 goto error;
1869 }
1870
1871 /* register pcie-to-pci device */
1872 if (dev_tmp) {
1873 info = alloc_devinfo_mem();
1874 if (!info) {
1875 domain_exit(domain);
1876 goto error;
1877 }
276dbf99 1878 info->segment = segment;
ba395927
KA
1879 info->bus = bus;
1880 info->devfn = devfn;
1881 info->dev = NULL;
1882 info->domain = domain;
1883 /* This domain is shared by devices under p2p bridge */
3b5410e7 1884 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
ba395927
KA
1885
1886 /* pcie-to-pci bridge already has a domain, uses it */
1887 found = NULL;
1888 spin_lock_irqsave(&device_domain_lock, flags);
1889 list_for_each_entry(tmp, &device_domain_list, global) {
276dbf99
DW
1890 if (tmp->segment == segment &&
1891 tmp->bus == bus && tmp->devfn == devfn) {
ba395927
KA
1892 found = tmp->domain;
1893 break;
1894 }
1895 }
1896 if (found) {
1897 free_devinfo_mem(info);
1898 domain_exit(domain);
1899 domain = found;
1900 } else {
1901 list_add(&info->link, &domain->devices);
1902 list_add(&info->global, &device_domain_list);
1903 }
1904 spin_unlock_irqrestore(&device_domain_lock, flags);
1905 }
1906
1907found_domain:
1908 info = alloc_devinfo_mem();
1909 if (!info)
1910 goto error;
276dbf99 1911 info->segment = segment;
ba395927
KA
1912 info->bus = pdev->bus->number;
1913 info->devfn = pdev->devfn;
1914 info->dev = pdev;
1915 info->domain = domain;
1916 spin_lock_irqsave(&device_domain_lock, flags);
1917 /* somebody is fast */
1918 found = find_domain(pdev);
1919 if (found != NULL) {
1920 spin_unlock_irqrestore(&device_domain_lock, flags);
1921 if (found != domain) {
1922 domain_exit(domain);
1923 domain = found;
1924 }
1925 free_devinfo_mem(info);
1926 return domain;
1927 }
1928 list_add(&info->link, &domain->devices);
1929 list_add(&info->global, &device_domain_list);
358dd8ac 1930 pdev->dev.archdata.iommu = info;
ba395927
KA
1931 spin_unlock_irqrestore(&device_domain_lock, flags);
1932 return domain;
1933error:
1934 /* recheck it here, maybe others set it */
1935 return find_domain(pdev);
1936}
1937
2c2e2c38 1938static int iommu_identity_mapping;
e0fc7e0b
DW
1939#define IDENTMAP_ALL 1
1940#define IDENTMAP_GFX 2
1941#define IDENTMAP_AZALIA 4
2c2e2c38 1942
b213203e
DW
1943static int iommu_domain_identity_map(struct dmar_domain *domain,
1944 unsigned long long start,
1945 unsigned long long end)
ba395927 1946{
c5395d5c
DW
1947 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1948 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1949
1950 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1951 dma_to_mm_pfn(last_vpfn))) {
ba395927 1952 printk(KERN_ERR "IOMMU: reserve iova failed\n");
b213203e 1953 return -ENOMEM;
ba395927
KA
1954 }
1955
c5395d5c
DW
1956 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1957 start, end, domain->id);
ba395927
KA
1958 /*
1959 * RMRR range might have overlap with physical memory range,
1960 * clear it first
1961 */
c5395d5c 1962 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
ba395927 1963
c5395d5c
DW
1964 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1965 last_vpfn - first_vpfn + 1,
61df7443 1966 DMA_PTE_READ|DMA_PTE_WRITE);
b213203e
DW
1967}
1968
1969static int iommu_prepare_identity_map(struct pci_dev *pdev,
1970 unsigned long long start,
1971 unsigned long long end)
1972{
1973 struct dmar_domain *domain;
1974 int ret;
1975
c7ab48d2 1976 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
b213203e
DW
1977 if (!domain)
1978 return -ENOMEM;
1979
19943b0e
DW
1980 /* For _hardware_ passthrough, don't bother. But for software
1981 passthrough, we do it anyway -- it may indicate a memory
1982 range which is reserved in E820, so which didn't get set
1983 up to start with in si_domain */
1984 if (domain == si_domain && hw_pass_through) {
1985 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1986 pci_name(pdev), start, end);
1987 return 0;
1988 }
1989
1990 printk(KERN_INFO
1991 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1992 pci_name(pdev), start, end);
2ff729f5
DW
1993
1994 if (end >> agaw_to_width(domain->agaw)) {
1995 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1996 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1997 agaw_to_width(domain->agaw),
1998 dmi_get_system_info(DMI_BIOS_VENDOR),
1999 dmi_get_system_info(DMI_BIOS_VERSION),
2000 dmi_get_system_info(DMI_PRODUCT_VERSION));
2001 ret = -EIO;
2002 goto error;
2003 }
19943b0e 2004
b213203e 2005 ret = iommu_domain_identity_map(domain, start, end);
ba395927
KA
2006 if (ret)
2007 goto error;
2008
2009 /* context entry init */
4ed0d3e6 2010 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
b213203e
DW
2011 if (ret)
2012 goto error;
2013
2014 return 0;
2015
2016 error:
ba395927
KA
2017 domain_exit(domain);
2018 return ret;
ba395927
KA
2019}
2020
2021static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2022 struct pci_dev *pdev)
2023{
358dd8ac 2024 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba395927
KA
2025 return 0;
2026 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2027 rmrr->end_address + 1);
2028}
2029
49a0429e
KA
2030#ifdef CONFIG_DMAR_FLOPPY_WA
2031static inline void iommu_prepare_isa(void)
2032{
2033 struct pci_dev *pdev;
2034 int ret;
2035
2036 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2037 if (!pdev)
2038 return;
2039
c7ab48d2 2040 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
49a0429e
KA
2041 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2042
2043 if (ret)
c7ab48d2
DW
2044 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2045 "floppy might not work\n");
49a0429e
KA
2046
2047}
2048#else
2049static inline void iommu_prepare_isa(void)
2050{
2051 return;
2052}
2053#endif /* !CONFIG_DMAR_FLPY_WA */
2054
2c2e2c38 2055static int md_domain_init(struct dmar_domain *domain, int guest_width);
c7ab48d2
DW
2056
2057static int __init si_domain_work_fn(unsigned long start_pfn,
2058 unsigned long end_pfn, void *datax)
2059{
2060 int *ret = datax;
2061
2062 *ret = iommu_domain_identity_map(si_domain,
2063 (uint64_t)start_pfn << PAGE_SHIFT,
2064 (uint64_t)end_pfn << PAGE_SHIFT);
2065 return *ret;
2066
2067}
2068
071e1374 2069static int __init si_domain_init(int hw)
2c2e2c38
FY
2070{
2071 struct dmar_drhd_unit *drhd;
2072 struct intel_iommu *iommu;
c7ab48d2 2073 int nid, ret = 0;
2c2e2c38
FY
2074
2075 si_domain = alloc_domain();
2076 if (!si_domain)
2077 return -EFAULT;
2078
c7ab48d2 2079 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2c2e2c38
FY
2080
2081 for_each_active_iommu(iommu, drhd) {
2082 ret = iommu_attach_domain(si_domain, iommu);
2083 if (ret) {
2084 domain_exit(si_domain);
2085 return -EFAULT;
2086 }
2087 }
2088
2089 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2090 domain_exit(si_domain);
2091 return -EFAULT;
2092 }
2093
2094 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2095
19943b0e
DW
2096 if (hw)
2097 return 0;
2098
c7ab48d2
DW
2099 for_each_online_node(nid) {
2100 work_with_active_regions(nid, si_domain_work_fn, &ret);
2101 if (ret)
2102 return ret;
2103 }
2104
2c2e2c38
FY
2105 return 0;
2106}
2107
2108static void domain_remove_one_dev_info(struct dmar_domain *domain,
2109 struct pci_dev *pdev);
2110static int identity_mapping(struct pci_dev *pdev)
2111{
2112 struct device_domain_info *info;
2113
2114 if (likely(!iommu_identity_mapping))
2115 return 0;
2116
2117
2118 list_for_each_entry(info, &si_domain->devices, link)
2119 if (info->dev == pdev)
2120 return 1;
2121 return 0;
2122}
2123
2124static int domain_add_dev_info(struct dmar_domain *domain,
5fe60f4e
DW
2125 struct pci_dev *pdev,
2126 int translation)
2c2e2c38
FY
2127{
2128 struct device_domain_info *info;
2129 unsigned long flags;
5fe60f4e 2130 int ret;
2c2e2c38
FY
2131
2132 info = alloc_devinfo_mem();
2133 if (!info)
2134 return -ENOMEM;
2135
5fe60f4e
DW
2136 ret = domain_context_mapping(domain, pdev, translation);
2137 if (ret) {
2138 free_devinfo_mem(info);
2139 return ret;
2140 }
2141
2c2e2c38
FY
2142 info->segment = pci_domain_nr(pdev->bus);
2143 info->bus = pdev->bus->number;
2144 info->devfn = pdev->devfn;
2145 info->dev = pdev;
2146 info->domain = domain;
2147
2148 spin_lock_irqsave(&device_domain_lock, flags);
2149 list_add(&info->link, &domain->devices);
2150 list_add(&info->global, &device_domain_list);
2151 pdev->dev.archdata.iommu = info;
2152 spin_unlock_irqrestore(&device_domain_lock, flags);
2153
2154 return 0;
2155}
2156
6941af28
DW
2157static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2158{
e0fc7e0b
DW
2159 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2160 return 1;
2161
2162 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2163 return 1;
2164
2165 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2166 return 0;
6941af28 2167
3dfc813d
DW
2168 /*
2169 * We want to start off with all devices in the 1:1 domain, and
2170 * take them out later if we find they can't access all of memory.
2171 *
2172 * However, we can't do this for PCI devices behind bridges,
2173 * because all PCI devices behind the same bridge will end up
2174 * with the same source-id on their transactions.
2175 *
2176 * Practically speaking, we can't change things around for these
2177 * devices at run-time, because we can't be sure there'll be no
2178 * DMA transactions in flight for any of their siblings.
2179 *
2180 * So PCI devices (unless they're on the root bus) as well as
2181 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2182 * the 1:1 domain, just in _case_ one of their siblings turns out
2183 * not to be able to map all of memory.
2184 */
2185 if (!pdev->is_pcie) {
2186 if (!pci_is_root_bus(pdev->bus))
2187 return 0;
2188 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2189 return 0;
2190 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2191 return 0;
2192
2193 /*
2194 * At boot time, we don't yet know if devices will be 64-bit capable.
2195 * Assume that they will -- if they turn out not to be, then we can
2196 * take them out of the 1:1 domain later.
2197 */
6941af28
DW
2198 if (!startup)
2199 return pdev->dma_mask > DMA_BIT_MASK(32);
2200
2201 return 1;
2202}
2203
071e1374 2204static int __init iommu_prepare_static_identity_mapping(int hw)
2c2e2c38 2205{
2c2e2c38
FY
2206 struct pci_dev *pdev = NULL;
2207 int ret;
2208
19943b0e 2209 ret = si_domain_init(hw);
2c2e2c38
FY
2210 if (ret)
2211 return -EFAULT;
2212
2c2e2c38 2213 for_each_pci_dev(pdev) {
6941af28 2214 if (iommu_should_identity_map(pdev, 1)) {
19943b0e
DW
2215 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2216 hw ? "hardware" : "software", pci_name(pdev));
62edf5dc 2217
5fe60f4e 2218 ret = domain_add_dev_info(si_domain, pdev,
19943b0e 2219 hw ? CONTEXT_TT_PASS_THROUGH :
62edf5dc
DW
2220 CONTEXT_TT_MULTI_LEVEL);
2221 if (ret)
2222 return ret;
62edf5dc 2223 }
2c2e2c38
FY
2224 }
2225
2226 return 0;
2227}
2228
2229int __init init_dmars(void)
ba395927
KA
2230{
2231 struct dmar_drhd_unit *drhd;
2232 struct dmar_rmrr_unit *rmrr;
2233 struct pci_dev *pdev;
2234 struct intel_iommu *iommu;
9d783ba0 2235 int i, ret;
2c2e2c38 2236
ba395927
KA
2237 /*
2238 * for each drhd
2239 * allocate root
2240 * initialize and program root entry to not present
2241 * endfor
2242 */
2243 for_each_drhd_unit(drhd) {
5e0d2a6f 2244 g_num_of_iommus++;
2245 /*
2246 * lock not needed as this is only incremented in the single
2247 * threaded kernel __init code path all other access are read
2248 * only
2249 */
2250 }
2251
d9630fe9
WH
2252 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2253 GFP_KERNEL);
2254 if (!g_iommus) {
2255 printk(KERN_ERR "Allocating global iommu array failed\n");
2256 ret = -ENOMEM;
2257 goto error;
2258 }
2259
80b20dd8 2260 deferred_flush = kzalloc(g_num_of_iommus *
2261 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2262 if (!deferred_flush) {
5e0d2a6f 2263 ret = -ENOMEM;
2264 goto error;
2265 }
2266
5e0d2a6f 2267 for_each_drhd_unit(drhd) {
2268 if (drhd->ignored)
2269 continue;
1886e8a9
SS
2270
2271 iommu = drhd->iommu;
d9630fe9 2272 g_iommus[iommu->seq_id] = iommu;
ba395927 2273
e61d98d8
SS
2274 ret = iommu_init_domains(iommu);
2275 if (ret)
2276 goto error;
2277
ba395927
KA
2278 /*
2279 * TBD:
2280 * we could share the same root & context tables
2281 * amoung all IOMMU's. Need to Split it later.
2282 */
2283 ret = iommu_alloc_root_entry(iommu);
2284 if (ret) {
2285 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2286 goto error;
2287 }
4ed0d3e6 2288 if (!ecap_pass_through(iommu->ecap))
19943b0e 2289 hw_pass_through = 0;
ba395927
KA
2290 }
2291
1531a6a6
SS
2292 /*
2293 * Start from the sane iommu hardware state.
2294 */
a77b67d4
YS
2295 for_each_drhd_unit(drhd) {
2296 if (drhd->ignored)
2297 continue;
2298
2299 iommu = drhd->iommu;
1531a6a6
SS
2300
2301 /*
2302 * If the queued invalidation is already initialized by us
2303 * (for example, while enabling interrupt-remapping) then
2304 * we got the things already rolling from a sane state.
2305 */
2306 if (iommu->qi)
2307 continue;
2308
2309 /*
2310 * Clear any previous faults.
2311 */
2312 dmar_fault(-1, iommu);
2313 /*
2314 * Disable queued invalidation if supported and already enabled
2315 * before OS handover.
2316 */
2317 dmar_disable_qi(iommu);
2318 }
2319
2320 for_each_drhd_unit(drhd) {
2321 if (drhd->ignored)
2322 continue;
2323
2324 iommu = drhd->iommu;
2325
a77b67d4
YS
2326 if (dmar_enable_qi(iommu)) {
2327 /*
2328 * Queued Invalidate not enabled, use Register Based
2329 * Invalidate
2330 */
2331 iommu->flush.flush_context = __iommu_flush_context;
2332 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2333 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
b4e0f9eb
FT
2334 "invalidation\n",
2335 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2336 } else {
2337 iommu->flush.flush_context = qi_flush_context;
2338 iommu->flush.flush_iotlb = qi_flush_iotlb;
2339 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
b4e0f9eb
FT
2340 "invalidation\n",
2341 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2342 }
2343 }
2344
19943b0e 2345 if (iommu_pass_through)
e0fc7e0b
DW
2346 iommu_identity_mapping |= IDENTMAP_ALL;
2347
19943b0e 2348#ifdef CONFIG_DMAR_BROKEN_GFX_WA
e0fc7e0b 2349 iommu_identity_mapping |= IDENTMAP_GFX;
19943b0e 2350#endif
e0fc7e0b
DW
2351
2352 check_tylersburg_isoch();
2353
ba395927 2354 /*
19943b0e
DW
2355 * If pass through is not set or not enabled, setup context entries for
2356 * identity mappings for rmrr, gfx, and isa and may fall back to static
2357 * identity mapping if iommu_identity_mapping is set.
ba395927 2358 */
19943b0e
DW
2359 if (iommu_identity_mapping) {
2360 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
4ed0d3e6 2361 if (ret) {
19943b0e
DW
2362 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2363 goto error;
ba395927
KA
2364 }
2365 }
ba395927 2366 /*
19943b0e
DW
2367 * For each rmrr
2368 * for each dev attached to rmrr
2369 * do
2370 * locate drhd for dev, alloc domain for dev
2371 * allocate free domain
2372 * allocate page table entries for rmrr
2373 * if context not allocated for bus
2374 * allocate and init context
2375 * set present in root table for this bus
2376 * init context with domain, translation etc
2377 * endfor
2378 * endfor
ba395927 2379 */
19943b0e
DW
2380 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2381 for_each_rmrr_units(rmrr) {
2382 for (i = 0; i < rmrr->devices_cnt; i++) {
2383 pdev = rmrr->devices[i];
2384 /*
2385 * some BIOS lists non-exist devices in DMAR
2386 * table.
2387 */
2388 if (!pdev)
2389 continue;
2390 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2391 if (ret)
2392 printk(KERN_ERR
2393 "IOMMU: mapping reserved region failed\n");
ba395927 2394 }
4ed0d3e6 2395 }
49a0429e 2396
19943b0e
DW
2397 iommu_prepare_isa();
2398
ba395927
KA
2399 /*
2400 * for each drhd
2401 * enable fault log
2402 * global invalidate context cache
2403 * global invalidate iotlb
2404 * enable translation
2405 */
2406 for_each_drhd_unit(drhd) {
2407 if (drhd->ignored)
2408 continue;
2409 iommu = drhd->iommu;
ba395927
KA
2410
2411 iommu_flush_write_buffer(iommu);
2412
3460a6d9
KA
2413 ret = dmar_set_interrupt(iommu);
2414 if (ret)
2415 goto error;
2416
ba395927
KA
2417 iommu_set_root_entry(iommu);
2418
4c25a2c1 2419 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1f0ef2aa 2420 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
f8bab735 2421
ba395927
KA
2422 ret = iommu_enable_translation(iommu);
2423 if (ret)
2424 goto error;
b94996c9
DW
2425
2426 iommu_disable_protect_mem_regions(iommu);
ba395927
KA
2427 }
2428
2429 return 0;
2430error:
2431 for_each_drhd_unit(drhd) {
2432 if (drhd->ignored)
2433 continue;
2434 iommu = drhd->iommu;
2435 free_iommu(iommu);
2436 }
d9630fe9 2437 kfree(g_iommus);
ba395927
KA
2438 return ret;
2439}
2440
5a5e02a6 2441/* This takes a number of _MM_ pages, not VTD pages */
875764de
DW
2442static struct iova *intel_alloc_iova(struct device *dev,
2443 struct dmar_domain *domain,
2444 unsigned long nrpages, uint64_t dma_mask)
ba395927 2445{
ba395927 2446 struct pci_dev *pdev = to_pci_dev(dev);
ba395927 2447 struct iova *iova = NULL;
ba395927 2448
875764de
DW
2449 /* Restrict dma_mask to the width that the iommu can handle */
2450 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2451
2452 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
ba395927
KA
2453 /*
2454 * First try to allocate an io virtual address in
284901a9 2455 * DMA_BIT_MASK(32) and if that fails then try allocating
3609801e 2456 * from higher range
ba395927 2457 */
875764de
DW
2458 iova = alloc_iova(&domain->iovad, nrpages,
2459 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2460 if (iova)
2461 return iova;
2462 }
2463 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2464 if (unlikely(!iova)) {
2465 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2466 nrpages, pci_name(pdev));
f76aec76
KA
2467 return NULL;
2468 }
2469
2470 return iova;
2471}
2472
147202aa 2473static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
f76aec76
KA
2474{
2475 struct dmar_domain *domain;
2476 int ret;
2477
2478 domain = get_domain_for_dev(pdev,
2479 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2480 if (!domain) {
2481 printk(KERN_ERR
2482 "Allocating domain for %s failed", pci_name(pdev));
4fe05bbc 2483 return NULL;
ba395927
KA
2484 }
2485
2486 /* make sure context mapping is ok */
5331fe6f 2487 if (unlikely(!domain_context_mapped(pdev))) {
4ed0d3e6
FY
2488 ret = domain_context_mapping(domain, pdev,
2489 CONTEXT_TT_MULTI_LEVEL);
f76aec76
KA
2490 if (ret) {
2491 printk(KERN_ERR
2492 "Domain context map for %s failed",
2493 pci_name(pdev));
4fe05bbc 2494 return NULL;
f76aec76 2495 }
ba395927
KA
2496 }
2497
f76aec76
KA
2498 return domain;
2499}
2500
147202aa
DW
2501static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2502{
2503 struct device_domain_info *info;
2504
2505 /* No lock here, assumes no domain exit in normal case */
2506 info = dev->dev.archdata.iommu;
2507 if (likely(info))
2508 return info->domain;
2509
2510 return __get_valid_domain_for_dev(dev);
2511}
2512
2c2e2c38
FY
2513static int iommu_dummy(struct pci_dev *pdev)
2514{
2515 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2516}
2517
2518/* Check if the pdev needs to go through non-identity map and unmap process.*/
73676832 2519static int iommu_no_mapping(struct device *dev)
2c2e2c38 2520{
73676832 2521 struct pci_dev *pdev;
2c2e2c38
FY
2522 int found;
2523
73676832
DW
2524 if (unlikely(dev->bus != &pci_bus_type))
2525 return 1;
2526
2527 pdev = to_pci_dev(dev);
1e4c64c4
DW
2528 if (iommu_dummy(pdev))
2529 return 1;
2530
2c2e2c38 2531 if (!iommu_identity_mapping)
1e4c64c4 2532 return 0;
2c2e2c38
FY
2533
2534 found = identity_mapping(pdev);
2535 if (found) {
6941af28 2536 if (iommu_should_identity_map(pdev, 0))
2c2e2c38
FY
2537 return 1;
2538 else {
2539 /*
2540 * 32 bit DMA is removed from si_domain and fall back
2541 * to non-identity mapping.
2542 */
2543 domain_remove_one_dev_info(si_domain, pdev);
2544 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2545 pci_name(pdev));
2546 return 0;
2547 }
2548 } else {
2549 /*
2550 * In case of a detached 64 bit DMA device from vm, the device
2551 * is put into si_domain for identity mapping.
2552 */
6941af28 2553 if (iommu_should_identity_map(pdev, 0)) {
2c2e2c38 2554 int ret;
5fe60f4e
DW
2555 ret = domain_add_dev_info(si_domain, pdev,
2556 hw_pass_through ?
2557 CONTEXT_TT_PASS_THROUGH :
2558 CONTEXT_TT_MULTI_LEVEL);
2c2e2c38
FY
2559 if (!ret) {
2560 printk(KERN_INFO "64bit %s uses identity mapping\n",
2561 pci_name(pdev));
2562 return 1;
2563 }
2564 }
2565 }
2566
1e4c64c4 2567 return 0;
2c2e2c38
FY
2568}
2569
bb9e6d65
FT
2570static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2571 size_t size, int dir, u64 dma_mask)
f76aec76
KA
2572{
2573 struct pci_dev *pdev = to_pci_dev(hwdev);
f76aec76 2574 struct dmar_domain *domain;
5b6985ce 2575 phys_addr_t start_paddr;
f76aec76
KA
2576 struct iova *iova;
2577 int prot = 0;
6865f0d1 2578 int ret;
8c11e798 2579 struct intel_iommu *iommu;
33041ec0 2580 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
f76aec76
KA
2581
2582 BUG_ON(dir == DMA_NONE);
2c2e2c38 2583
73676832 2584 if (iommu_no_mapping(hwdev))
6865f0d1 2585 return paddr;
f76aec76
KA
2586
2587 domain = get_valid_domain_for_dev(pdev);
2588 if (!domain)
2589 return 0;
2590
8c11e798 2591 iommu = domain_get_iommu(domain);
88cb6a74 2592 size = aligned_nrpages(paddr, size);
f76aec76 2593
5a5e02a6
DW
2594 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2595 pdev->dma_mask);
f76aec76
KA
2596 if (!iova)
2597 goto error;
2598
ba395927
KA
2599 /*
2600 * Check if DMAR supports zero-length reads on write only
2601 * mappings..
2602 */
2603 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2604 !cap_zlr(iommu->cap))
ba395927
KA
2605 prot |= DMA_PTE_READ;
2606 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2607 prot |= DMA_PTE_WRITE;
2608 /*
6865f0d1 2609 * paddr - (paddr + size) might be partial page, we should map the whole
ba395927 2610 * page. Note: if two part of one page are separately mapped, we
6865f0d1 2611 * might have two guest_addr mapping to the same host paddr, but this
ba395927
KA
2612 * is not a big problem
2613 */
0ab36de2 2614 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
33041ec0 2615 mm_to_dma_pfn(paddr_pfn), size, prot);
ba395927
KA
2616 if (ret)
2617 goto error;
2618
1f0ef2aa
DW
2619 /* it's a non-present to present mapping. Only flush if caching mode */
2620 if (cap_caching_mode(iommu->cap))
03d6a246 2621 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
1f0ef2aa 2622 else
8c11e798 2623 iommu_flush_write_buffer(iommu);
f76aec76 2624
03d6a246
DW
2625 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2626 start_paddr += paddr & ~PAGE_MASK;
2627 return start_paddr;
ba395927 2628
ba395927 2629error:
f76aec76
KA
2630 if (iova)
2631 __free_iova(&domain->iovad, iova);
4cf2e75d 2632 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
5b6985ce 2633 pci_name(pdev), size, (unsigned long long)paddr, dir);
ba395927
KA
2634 return 0;
2635}
2636
ffbbef5c
FT
2637static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2638 unsigned long offset, size_t size,
2639 enum dma_data_direction dir,
2640 struct dma_attrs *attrs)
bb9e6d65 2641{
ffbbef5c
FT
2642 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2643 dir, to_pci_dev(dev)->dma_mask);
bb9e6d65
FT
2644}
2645
5e0d2a6f 2646static void flush_unmaps(void)
2647{
80b20dd8 2648 int i, j;
5e0d2a6f 2649
5e0d2a6f 2650 timer_on = 0;
2651
2652 /* just flush them all */
2653 for (i = 0; i < g_num_of_iommus; i++) {
a2bb8459
WH
2654 struct intel_iommu *iommu = g_iommus[i];
2655 if (!iommu)
2656 continue;
c42d9f32 2657
9dd2fe89
YZ
2658 if (!deferred_flush[i].next)
2659 continue;
2660
2661 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
93a23a72 2662 DMA_TLB_GLOBAL_FLUSH);
9dd2fe89 2663 for (j = 0; j < deferred_flush[i].next; j++) {
93a23a72
YZ
2664 unsigned long mask;
2665 struct iova *iova = deferred_flush[i].iova[j];
2666
64de5af0 2667 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
93a23a72 2668 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
64de5af0 2669 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
93a23a72 2670 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
80b20dd8 2671 }
9dd2fe89 2672 deferred_flush[i].next = 0;
5e0d2a6f 2673 }
2674
5e0d2a6f 2675 list_size = 0;
5e0d2a6f 2676}
2677
2678static void flush_unmaps_timeout(unsigned long data)
2679{
80b20dd8 2680 unsigned long flags;
2681
2682 spin_lock_irqsave(&async_umap_flush_lock, flags);
5e0d2a6f 2683 flush_unmaps();
80b20dd8 2684 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
5e0d2a6f 2685}
2686
2687static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2688{
2689 unsigned long flags;
80b20dd8 2690 int next, iommu_id;
8c11e798 2691 struct intel_iommu *iommu;
5e0d2a6f 2692
2693 spin_lock_irqsave(&async_umap_flush_lock, flags);
80b20dd8 2694 if (list_size == HIGH_WATER_MARK)
2695 flush_unmaps();
2696
8c11e798
WH
2697 iommu = domain_get_iommu(dom);
2698 iommu_id = iommu->seq_id;
c42d9f32 2699
80b20dd8 2700 next = deferred_flush[iommu_id].next;
2701 deferred_flush[iommu_id].domain[next] = dom;
2702 deferred_flush[iommu_id].iova[next] = iova;
2703 deferred_flush[iommu_id].next++;
5e0d2a6f 2704
2705 if (!timer_on) {
2706 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2707 timer_on = 1;
2708 }
2709 list_size++;
2710 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2711}
2712
ffbbef5c
FT
2713static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2714 size_t size, enum dma_data_direction dir,
2715 struct dma_attrs *attrs)
ba395927 2716{
ba395927 2717 struct pci_dev *pdev = to_pci_dev(dev);
f76aec76 2718 struct dmar_domain *domain;
d794dc9b 2719 unsigned long start_pfn, last_pfn;
ba395927 2720 struct iova *iova;
8c11e798 2721 struct intel_iommu *iommu;
ba395927 2722
73676832 2723 if (iommu_no_mapping(dev))
f76aec76 2724 return;
2c2e2c38 2725
ba395927
KA
2726 domain = find_domain(pdev);
2727 BUG_ON(!domain);
2728
8c11e798
WH
2729 iommu = domain_get_iommu(domain);
2730
ba395927 2731 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
85b98276
DW
2732 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2733 (unsigned long long)dev_addr))
ba395927 2734 return;
ba395927 2735
d794dc9b
DW
2736 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2737 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
ba395927 2738
d794dc9b
DW
2739 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2740 pci_name(pdev), start_pfn, last_pfn);
ba395927 2741
f76aec76 2742 /* clear the whole page */
d794dc9b
DW
2743 dma_pte_clear_range(domain, start_pfn, last_pfn);
2744
f76aec76 2745 /* free page tables */
d794dc9b
DW
2746 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2747
5e0d2a6f 2748 if (intel_iommu_strict) {
03d6a246 2749 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
d794dc9b 2750 last_pfn - start_pfn + 1);
5e0d2a6f 2751 /* free iova */
2752 __free_iova(&domain->iovad, iova);
2753 } else {
2754 add_unmap(domain, iova);
2755 /*
2756 * queue up the release of the unmap to save the 1/6th of the
2757 * cpu used up by the iotlb flush operation...
2758 */
5e0d2a6f 2759 }
ba395927
KA
2760}
2761
d7ab5c46
FT
2762static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2763 dma_addr_t *dma_handle, gfp_t flags)
ba395927
KA
2764{
2765 void *vaddr;
2766 int order;
2767
5b6985ce 2768 size = PAGE_ALIGN(size);
ba395927
KA
2769 order = get_order(size);
2770 flags &= ~(GFP_DMA | GFP_DMA32);
2771
2772 vaddr = (void *)__get_free_pages(flags, order);
2773 if (!vaddr)
2774 return NULL;
2775 memset(vaddr, 0, size);
2776
bb9e6d65
FT
2777 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2778 DMA_BIDIRECTIONAL,
2779 hwdev->coherent_dma_mask);
ba395927
KA
2780 if (*dma_handle)
2781 return vaddr;
2782 free_pages((unsigned long)vaddr, order);
2783 return NULL;
2784}
2785
d7ab5c46
FT
2786static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2787 dma_addr_t dma_handle)
ba395927
KA
2788{
2789 int order;
2790
5b6985ce 2791 size = PAGE_ALIGN(size);
ba395927
KA
2792 order = get_order(size);
2793
0db9b7ae 2794 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
ba395927
KA
2795 free_pages((unsigned long)vaddr, order);
2796}
2797
d7ab5c46
FT
2798static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2799 int nelems, enum dma_data_direction dir,
2800 struct dma_attrs *attrs)
ba395927 2801{
ba395927
KA
2802 struct pci_dev *pdev = to_pci_dev(hwdev);
2803 struct dmar_domain *domain;
d794dc9b 2804 unsigned long start_pfn, last_pfn;
f76aec76 2805 struct iova *iova;
8c11e798 2806 struct intel_iommu *iommu;
ba395927 2807
73676832 2808 if (iommu_no_mapping(hwdev))
ba395927
KA
2809 return;
2810
2811 domain = find_domain(pdev);
8c11e798
WH
2812 BUG_ON(!domain);
2813
2814 iommu = domain_get_iommu(domain);
ba395927 2815
c03ab37c 2816 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
85b98276
DW
2817 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2818 (unsigned long long)sglist[0].dma_address))
f76aec76 2819 return;
f76aec76 2820
d794dc9b
DW
2821 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2822 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
f76aec76
KA
2823
2824 /* clear the whole page */
d794dc9b
DW
2825 dma_pte_clear_range(domain, start_pfn, last_pfn);
2826
f76aec76 2827 /* free page tables */
d794dc9b 2828 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
f76aec76 2829
acea0018
DW
2830 if (intel_iommu_strict) {
2831 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2832 last_pfn - start_pfn + 1);
2833 /* free iova */
2834 __free_iova(&domain->iovad, iova);
2835 } else {
2836 add_unmap(domain, iova);
2837 /*
2838 * queue up the release of the unmap to save the 1/6th of the
2839 * cpu used up by the iotlb flush operation...
2840 */
2841 }
ba395927
KA
2842}
2843
ba395927 2844static int intel_nontranslate_map_sg(struct device *hddev,
c03ab37c 2845 struct scatterlist *sglist, int nelems, int dir)
ba395927
KA
2846{
2847 int i;
c03ab37c 2848 struct scatterlist *sg;
ba395927 2849
c03ab37c 2850 for_each_sg(sglist, sg, nelems, i) {
12d4d40e 2851 BUG_ON(!sg_page(sg));
4cf2e75d 2852 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
c03ab37c 2853 sg->dma_length = sg->length;
ba395927
KA
2854 }
2855 return nelems;
2856}
2857
d7ab5c46
FT
2858static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2859 enum dma_data_direction dir, struct dma_attrs *attrs)
ba395927 2860{
ba395927 2861 int i;
ba395927
KA
2862 struct pci_dev *pdev = to_pci_dev(hwdev);
2863 struct dmar_domain *domain;
f76aec76
KA
2864 size_t size = 0;
2865 int prot = 0;
b536d24d 2866 size_t offset_pfn = 0;
f76aec76
KA
2867 struct iova *iova = NULL;
2868 int ret;
c03ab37c 2869 struct scatterlist *sg;
b536d24d 2870 unsigned long start_vpfn;
8c11e798 2871 struct intel_iommu *iommu;
ba395927
KA
2872
2873 BUG_ON(dir == DMA_NONE);
73676832 2874 if (iommu_no_mapping(hwdev))
c03ab37c 2875 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
ba395927 2876
f76aec76
KA
2877 domain = get_valid_domain_for_dev(pdev);
2878 if (!domain)
2879 return 0;
2880
8c11e798
WH
2881 iommu = domain_get_iommu(domain);
2882
b536d24d 2883 for_each_sg(sglist, sg, nelems, i)
88cb6a74 2884 size += aligned_nrpages(sg->offset, sg->length);
f76aec76 2885
5a5e02a6
DW
2886 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2887 pdev->dma_mask);
f76aec76 2888 if (!iova) {
c03ab37c 2889 sglist->dma_length = 0;
f76aec76
KA
2890 return 0;
2891 }
2892
2893 /*
2894 * Check if DMAR supports zero-length reads on write only
2895 * mappings..
2896 */
2897 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2898 !cap_zlr(iommu->cap))
f76aec76
KA
2899 prot |= DMA_PTE_READ;
2900 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2901 prot |= DMA_PTE_WRITE;
2902
b536d24d 2903 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
e1605495 2904
f532959b 2905 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
e1605495
DW
2906 if (unlikely(ret)) {
2907 /* clear the page */
2908 dma_pte_clear_range(domain, start_vpfn,
2909 start_vpfn + size - 1);
2910 /* free page tables */
2911 dma_pte_free_pagetable(domain, start_vpfn,
2912 start_vpfn + size - 1);
2913 /* free iova */
2914 __free_iova(&domain->iovad, iova);
2915 return 0;
ba395927
KA
2916 }
2917
1f0ef2aa
DW
2918 /* it's a non-present to present mapping. Only flush if caching mode */
2919 if (cap_caching_mode(iommu->cap))
03d6a246 2920 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
1f0ef2aa 2921 else
8c11e798 2922 iommu_flush_write_buffer(iommu);
1f0ef2aa 2923
ba395927
KA
2924 return nelems;
2925}
2926
dfb805e8
FT
2927static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2928{
2929 return !dma_addr;
2930}
2931
160c1d8e 2932struct dma_map_ops intel_dma_ops = {
ba395927
KA
2933 .alloc_coherent = intel_alloc_coherent,
2934 .free_coherent = intel_free_coherent,
ba395927
KA
2935 .map_sg = intel_map_sg,
2936 .unmap_sg = intel_unmap_sg,
ffbbef5c
FT
2937 .map_page = intel_map_page,
2938 .unmap_page = intel_unmap_page,
dfb805e8 2939 .mapping_error = intel_mapping_error,
ba395927
KA
2940};
2941
2942static inline int iommu_domain_cache_init(void)
2943{
2944 int ret = 0;
2945
2946 iommu_domain_cache = kmem_cache_create("iommu_domain",
2947 sizeof(struct dmar_domain),
2948 0,
2949 SLAB_HWCACHE_ALIGN,
2950
2951 NULL);
2952 if (!iommu_domain_cache) {
2953 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2954 ret = -ENOMEM;
2955 }
2956
2957 return ret;
2958}
2959
2960static inline int iommu_devinfo_cache_init(void)
2961{
2962 int ret = 0;
2963
2964 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2965 sizeof(struct device_domain_info),
2966 0,
2967 SLAB_HWCACHE_ALIGN,
ba395927
KA
2968 NULL);
2969 if (!iommu_devinfo_cache) {
2970 printk(KERN_ERR "Couldn't create devinfo cache\n");
2971 ret = -ENOMEM;
2972 }
2973
2974 return ret;
2975}
2976
2977static inline int iommu_iova_cache_init(void)
2978{
2979 int ret = 0;
2980
2981 iommu_iova_cache = kmem_cache_create("iommu_iova",
2982 sizeof(struct iova),
2983 0,
2984 SLAB_HWCACHE_ALIGN,
ba395927
KA
2985 NULL);
2986 if (!iommu_iova_cache) {
2987 printk(KERN_ERR "Couldn't create iova cache\n");
2988 ret = -ENOMEM;
2989 }
2990
2991 return ret;
2992}
2993
2994static int __init iommu_init_mempool(void)
2995{
2996 int ret;
2997 ret = iommu_iova_cache_init();
2998 if (ret)
2999 return ret;
3000
3001 ret = iommu_domain_cache_init();
3002 if (ret)
3003 goto domain_error;
3004
3005 ret = iommu_devinfo_cache_init();
3006 if (!ret)
3007 return ret;
3008
3009 kmem_cache_destroy(iommu_domain_cache);
3010domain_error:
3011 kmem_cache_destroy(iommu_iova_cache);
3012
3013 return -ENOMEM;
3014}
3015
3016static void __init iommu_exit_mempool(void)
3017{
3018 kmem_cache_destroy(iommu_devinfo_cache);
3019 kmem_cache_destroy(iommu_domain_cache);
3020 kmem_cache_destroy(iommu_iova_cache);
3021
3022}
3023
ba395927
KA
3024static void __init init_no_remapping_devices(void)
3025{
3026 struct dmar_drhd_unit *drhd;
3027
3028 for_each_drhd_unit(drhd) {
3029 if (!drhd->include_all) {
3030 int i;
3031 for (i = 0; i < drhd->devices_cnt; i++)
3032 if (drhd->devices[i] != NULL)
3033 break;
3034 /* ignore DMAR unit if no pci devices exist */
3035 if (i == drhd->devices_cnt)
3036 drhd->ignored = 1;
3037 }
3038 }
3039
3040 if (dmar_map_gfx)
3041 return;
3042
3043 for_each_drhd_unit(drhd) {
3044 int i;
3045 if (drhd->ignored || drhd->include_all)
3046 continue;
3047
3048 for (i = 0; i < drhd->devices_cnt; i++)
3049 if (drhd->devices[i] &&
3050 !IS_GFX_DEVICE(drhd->devices[i]))
3051 break;
3052
3053 if (i < drhd->devices_cnt)
3054 continue;
3055
3056 /* bypass IOMMU if it is just for gfx devices */
3057 drhd->ignored = 1;
3058 for (i = 0; i < drhd->devices_cnt; i++) {
3059 if (!drhd->devices[i])
3060 continue;
358dd8ac 3061 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
ba395927
KA
3062 }
3063 }
3064}
3065
f59c7b69
FY
3066#ifdef CONFIG_SUSPEND
3067static int init_iommu_hw(void)
3068{
3069 struct dmar_drhd_unit *drhd;
3070 struct intel_iommu *iommu = NULL;
3071
3072 for_each_active_iommu(iommu, drhd)
3073 if (iommu->qi)
3074 dmar_reenable_qi(iommu);
3075
3076 for_each_active_iommu(iommu, drhd) {
3077 iommu_flush_write_buffer(iommu);
3078
3079 iommu_set_root_entry(iommu);
3080
3081 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 3082 DMA_CCMD_GLOBAL_INVL);
f59c7b69 3083 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 3084 DMA_TLB_GLOBAL_FLUSH);
f59c7b69 3085 iommu_enable_translation(iommu);
b94996c9 3086 iommu_disable_protect_mem_regions(iommu);
f59c7b69
FY
3087 }
3088
3089 return 0;
3090}
3091
3092static void iommu_flush_all(void)
3093{
3094 struct dmar_drhd_unit *drhd;
3095 struct intel_iommu *iommu;
3096
3097 for_each_active_iommu(iommu, drhd) {
3098 iommu->flush.flush_context(iommu, 0, 0, 0,
1f0ef2aa 3099 DMA_CCMD_GLOBAL_INVL);
f59c7b69 3100 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1f0ef2aa 3101 DMA_TLB_GLOBAL_FLUSH);
f59c7b69
FY
3102 }
3103}
3104
3105static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3106{
3107 struct dmar_drhd_unit *drhd;
3108 struct intel_iommu *iommu = NULL;
3109 unsigned long flag;
3110
3111 for_each_active_iommu(iommu, drhd) {
3112 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3113 GFP_ATOMIC);
3114 if (!iommu->iommu_state)
3115 goto nomem;
3116 }
3117
3118 iommu_flush_all();
3119
3120 for_each_active_iommu(iommu, drhd) {
3121 iommu_disable_translation(iommu);
3122
3123 spin_lock_irqsave(&iommu->register_lock, flag);
3124
3125 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3126 readl(iommu->reg + DMAR_FECTL_REG);
3127 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3128 readl(iommu->reg + DMAR_FEDATA_REG);
3129 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3130 readl(iommu->reg + DMAR_FEADDR_REG);
3131 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3132 readl(iommu->reg + DMAR_FEUADDR_REG);
3133
3134 spin_unlock_irqrestore(&iommu->register_lock, flag);
3135 }
3136 return 0;
3137
3138nomem:
3139 for_each_active_iommu(iommu, drhd)
3140 kfree(iommu->iommu_state);
3141
3142 return -ENOMEM;
3143}
3144
3145static int iommu_resume(struct sys_device *dev)
3146{
3147 struct dmar_drhd_unit *drhd;
3148 struct intel_iommu *iommu = NULL;
3149 unsigned long flag;
3150
3151 if (init_iommu_hw()) {
3152 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3153 return -EIO;
3154 }
3155
3156 for_each_active_iommu(iommu, drhd) {
3157
3158 spin_lock_irqsave(&iommu->register_lock, flag);
3159
3160 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3161 iommu->reg + DMAR_FECTL_REG);
3162 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3163 iommu->reg + DMAR_FEDATA_REG);
3164 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3165 iommu->reg + DMAR_FEADDR_REG);
3166 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3167 iommu->reg + DMAR_FEUADDR_REG);
3168
3169 spin_unlock_irqrestore(&iommu->register_lock, flag);
3170 }
3171
3172 for_each_active_iommu(iommu, drhd)
3173 kfree(iommu->iommu_state);
3174
3175 return 0;
3176}
3177
3178static struct sysdev_class iommu_sysclass = {
3179 .name = "iommu",
3180 .resume = iommu_resume,
3181 .suspend = iommu_suspend,
3182};
3183
3184static struct sys_device device_iommu = {
3185 .cls = &iommu_sysclass,
3186};
3187
3188static int __init init_iommu_sysfs(void)
3189{
3190 int error;
3191
3192 error = sysdev_class_register(&iommu_sysclass);
3193 if (error)
3194 return error;
3195
3196 error = sysdev_register(&device_iommu);
3197 if (error)
3198 sysdev_class_unregister(&iommu_sysclass);
3199
3200 return error;
3201}
3202
3203#else
3204static int __init init_iommu_sysfs(void)
3205{
3206 return 0;
3207}
3208#endif /* CONFIG_PM */
3209
ba395927
KA
3210int __init intel_iommu_init(void)
3211{
3212 int ret = 0;
a59b50e9 3213 int force_on = 0;
ba395927 3214
a59b50e9
JC
3215 /* VT-d is required for a TXT/tboot launch, so enforce that */
3216 force_on = tboot_force_iommu();
3217
3218 if (dmar_table_init()) {
3219 if (force_on)
3220 panic("tboot: Failed to initialize DMAR table\n");
ba395927 3221 return -ENODEV;
a59b50e9 3222 }
ba395927 3223
a59b50e9
JC
3224 if (dmar_dev_scope_init()) {
3225 if (force_on)
3226 panic("tboot: Failed to initialize DMAR device scope\n");
1886e8a9 3227 return -ENODEV;
a59b50e9 3228 }
1886e8a9 3229
2ae21010
SS
3230 /*
3231 * Check the need for DMA-remapping initialization now.
3232 * Above initialization will also be used by Interrupt-remapping.
3233 */
19943b0e 3234 if (no_iommu || swiotlb || dmar_disabled)
2ae21010
SS
3235 return -ENODEV;
3236
ba395927
KA
3237 iommu_init_mempool();
3238 dmar_init_reserved_ranges();
3239
3240 init_no_remapping_devices();
3241
3242 ret = init_dmars();
3243 if (ret) {
a59b50e9
JC
3244 if (force_on)
3245 panic("tboot: Failed to initialize DMARs\n");
ba395927
KA
3246 printk(KERN_ERR "IOMMU: dmar init failed\n");
3247 put_iova_domain(&reserved_iova_list);
3248 iommu_exit_mempool();
3249 return ret;
3250 }
3251 printk(KERN_INFO
3252 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3253
5e0d2a6f 3254 init_timer(&unmap_timer);
ba395927 3255 force_iommu = 1;
19943b0e 3256 dma_ops = &intel_dma_ops;
4ed0d3e6 3257
f59c7b69 3258 init_iommu_sysfs();
a8bcbb0d
JR
3259
3260 register_iommu(&intel_iommu_ops);
3261
ba395927
KA
3262 return 0;
3263}
e820482c 3264
3199aa6b
HW
3265static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3266 struct pci_dev *pdev)
3267{
3268 struct pci_dev *tmp, *parent;
3269
3270 if (!iommu || !pdev)
3271 return;
3272
3273 /* dependent device detach */
3274 tmp = pci_find_upstream_pcie_bridge(pdev);
3275 /* Secondary interface's bus number and devfn 0 */
3276 if (tmp) {
3277 parent = pdev->bus->self;
3278 while (parent != tmp) {
3279 iommu_detach_dev(iommu, parent->bus->number,
276dbf99 3280 parent->devfn);
3199aa6b
HW
3281 parent = parent->bus->self;
3282 }
3283 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3284 iommu_detach_dev(iommu,
3285 tmp->subordinate->number, 0);
3286 else /* this is a legacy PCI bridge */
276dbf99
DW
3287 iommu_detach_dev(iommu, tmp->bus->number,
3288 tmp->devfn);
3199aa6b
HW
3289 }
3290}
3291
2c2e2c38 3292static void domain_remove_one_dev_info(struct dmar_domain *domain,
c7151a8d
WH
3293 struct pci_dev *pdev)
3294{
3295 struct device_domain_info *info;
3296 struct intel_iommu *iommu;
3297 unsigned long flags;
3298 int found = 0;
3299 struct list_head *entry, *tmp;
3300
276dbf99
DW
3301 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3302 pdev->devfn);
c7151a8d
WH
3303 if (!iommu)
3304 return;
3305
3306 spin_lock_irqsave(&device_domain_lock, flags);
3307 list_for_each_safe(entry, tmp, &domain->devices) {
3308 info = list_entry(entry, struct device_domain_info, link);
276dbf99 3309 /* No need to compare PCI domain; it has to be the same */
c7151a8d
WH
3310 if (info->bus == pdev->bus->number &&
3311 info->devfn == pdev->devfn) {
3312 list_del(&info->link);
3313 list_del(&info->global);
3314 if (info->dev)
3315 info->dev->dev.archdata.iommu = NULL;
3316 spin_unlock_irqrestore(&device_domain_lock, flags);
3317
93a23a72 3318 iommu_disable_dev_iotlb(info);
c7151a8d 3319 iommu_detach_dev(iommu, info->bus, info->devfn);
3199aa6b 3320 iommu_detach_dependent_devices(iommu, pdev);
c7151a8d
WH
3321 free_devinfo_mem(info);
3322
3323 spin_lock_irqsave(&device_domain_lock, flags);
3324
3325 if (found)
3326 break;
3327 else
3328 continue;
3329 }
3330
3331 /* if there is no other devices under the same iommu
3332 * owned by this domain, clear this iommu in iommu_bmp
3333 * update iommu count and coherency
3334 */
276dbf99
DW
3335 if (iommu == device_to_iommu(info->segment, info->bus,
3336 info->devfn))
c7151a8d
WH
3337 found = 1;
3338 }
3339
3340 if (found == 0) {
3341 unsigned long tmp_flags;
3342 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3343 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3344 domain->iommu_count--;
58c610bd 3345 domain_update_iommu_cap(domain);
c7151a8d
WH
3346 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3347 }
3348
3349 spin_unlock_irqrestore(&device_domain_lock, flags);
3350}
3351
3352static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3353{
3354 struct device_domain_info *info;
3355 struct intel_iommu *iommu;
3356 unsigned long flags1, flags2;
3357
3358 spin_lock_irqsave(&device_domain_lock, flags1);
3359 while (!list_empty(&domain->devices)) {
3360 info = list_entry(domain->devices.next,
3361 struct device_domain_info, link);
3362 list_del(&info->link);
3363 list_del(&info->global);
3364 if (info->dev)
3365 info->dev->dev.archdata.iommu = NULL;
3366
3367 spin_unlock_irqrestore(&device_domain_lock, flags1);
3368
93a23a72 3369 iommu_disable_dev_iotlb(info);
276dbf99 3370 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
c7151a8d 3371 iommu_detach_dev(iommu, info->bus, info->devfn);
3199aa6b 3372 iommu_detach_dependent_devices(iommu, info->dev);
c7151a8d
WH
3373
3374 /* clear this iommu in iommu_bmp, update iommu count
58c610bd 3375 * and capabilities
c7151a8d
WH
3376 */
3377 spin_lock_irqsave(&domain->iommu_lock, flags2);
3378 if (test_and_clear_bit(iommu->seq_id,
3379 &domain->iommu_bmp)) {
3380 domain->iommu_count--;
58c610bd 3381 domain_update_iommu_cap(domain);
c7151a8d
WH
3382 }
3383 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3384
3385 free_devinfo_mem(info);
3386 spin_lock_irqsave(&device_domain_lock, flags1);
3387 }
3388 spin_unlock_irqrestore(&device_domain_lock, flags1);
3389}
3390
5e98c4b1
WH
3391/* domain id for virtual machine, it won't be set in context */
3392static unsigned long vm_domid;
3393
fe40f1e0
WH
3394static int vm_domain_min_agaw(struct dmar_domain *domain)
3395{
3396 int i;
3397 int min_agaw = domain->agaw;
3398
3399 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3400 for (; i < g_num_of_iommus; ) {
3401 if (min_agaw > g_iommus[i]->agaw)
3402 min_agaw = g_iommus[i]->agaw;
3403
3404 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3405 }
3406
3407 return min_agaw;
3408}
3409
5e98c4b1
WH
3410static struct dmar_domain *iommu_alloc_vm_domain(void)
3411{
3412 struct dmar_domain *domain;
3413
3414 domain = alloc_domain_mem();
3415 if (!domain)
3416 return NULL;
3417
3418 domain->id = vm_domid++;
3419 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3420 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3421
3422 return domain;
3423}
3424
2c2e2c38 3425static int md_domain_init(struct dmar_domain *domain, int guest_width)
5e98c4b1
WH
3426{
3427 int adjust_width;
3428
3429 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
5e98c4b1
WH
3430 spin_lock_init(&domain->iommu_lock);
3431
3432 domain_reserve_special_ranges(domain);
3433
3434 /* calculate AGAW */
3435 domain->gaw = guest_width;
3436 adjust_width = guestwidth_to_adjustwidth(guest_width);
3437 domain->agaw = width_to_agaw(adjust_width);
3438
3439 INIT_LIST_HEAD(&domain->devices);
3440
3441 domain->iommu_count = 0;
3442 domain->iommu_coherency = 0;
c5b15255 3443 domain->iommu_snooping = 0;
fe40f1e0 3444 domain->max_addr = 0;
5e98c4b1
WH
3445
3446 /* always allocate the top pgd */
3447 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3448 if (!domain->pgd)
3449 return -ENOMEM;
3450 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3451 return 0;
3452}
3453
3454static void iommu_free_vm_domain(struct dmar_domain *domain)
3455{
3456 unsigned long flags;
3457 struct dmar_drhd_unit *drhd;
3458 struct intel_iommu *iommu;
3459 unsigned long i;
3460 unsigned long ndomains;
3461
3462 for_each_drhd_unit(drhd) {
3463 if (drhd->ignored)
3464 continue;
3465 iommu = drhd->iommu;
3466
3467 ndomains = cap_ndoms(iommu->cap);
3468 i = find_first_bit(iommu->domain_ids, ndomains);
3469 for (; i < ndomains; ) {
3470 if (iommu->domains[i] == domain) {
3471 spin_lock_irqsave(&iommu->lock, flags);
3472 clear_bit(i, iommu->domain_ids);
3473 iommu->domains[i] = NULL;
3474 spin_unlock_irqrestore(&iommu->lock, flags);
3475 break;
3476 }
3477 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3478 }
3479 }
3480}
3481
3482static void vm_domain_exit(struct dmar_domain *domain)
3483{
5e98c4b1
WH
3484 /* Domain 0 is reserved, so dont process it */
3485 if (!domain)
3486 return;
3487
3488 vm_domain_remove_all_dev_info(domain);
3489 /* destroy iovas */
3490 put_iova_domain(&domain->iovad);
5e98c4b1
WH
3491
3492 /* clear ptes */
595badf5 3493 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
5e98c4b1
WH
3494
3495 /* free page tables */
d794dc9b 3496 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
5e98c4b1
WH
3497
3498 iommu_free_vm_domain(domain);
3499 free_domain_mem(domain);
3500}
3501
5d450806 3502static int intel_iommu_domain_init(struct iommu_domain *domain)
38717946 3503{
5d450806 3504 struct dmar_domain *dmar_domain;
38717946 3505
5d450806
JR
3506 dmar_domain = iommu_alloc_vm_domain();
3507 if (!dmar_domain) {
38717946 3508 printk(KERN_ERR
5d450806
JR
3509 "intel_iommu_domain_init: dmar_domain == NULL\n");
3510 return -ENOMEM;
38717946 3511 }
2c2e2c38 3512 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
38717946 3513 printk(KERN_ERR
5d450806
JR
3514 "intel_iommu_domain_init() failed\n");
3515 vm_domain_exit(dmar_domain);
3516 return -ENOMEM;
38717946 3517 }
5d450806 3518 domain->priv = dmar_domain;
faa3d6f5 3519
5d450806 3520 return 0;
38717946 3521}
38717946 3522
5d450806 3523static void intel_iommu_domain_destroy(struct iommu_domain *domain)
38717946 3524{
5d450806
JR
3525 struct dmar_domain *dmar_domain = domain->priv;
3526
3527 domain->priv = NULL;
3528 vm_domain_exit(dmar_domain);
38717946 3529}
38717946 3530
4c5478c9
JR
3531static int intel_iommu_attach_device(struct iommu_domain *domain,
3532 struct device *dev)
38717946 3533{
4c5478c9
JR
3534 struct dmar_domain *dmar_domain = domain->priv;
3535 struct pci_dev *pdev = to_pci_dev(dev);
fe40f1e0
WH
3536 struct intel_iommu *iommu;
3537 int addr_width;
3538 u64 end;
faa3d6f5
WH
3539
3540 /* normally pdev is not mapped */
3541 if (unlikely(domain_context_mapped(pdev))) {
3542 struct dmar_domain *old_domain;
3543
3544 old_domain = find_domain(pdev);
3545 if (old_domain) {
2c2e2c38
FY
3546 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3547 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3548 domain_remove_one_dev_info(old_domain, pdev);
faa3d6f5
WH
3549 else
3550 domain_remove_dev_info(old_domain);
3551 }
3552 }
3553
276dbf99
DW
3554 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3555 pdev->devfn);
fe40f1e0
WH
3556 if (!iommu)
3557 return -ENODEV;
3558
3559 /* check if this iommu agaw is sufficient for max mapped address */
3560 addr_width = agaw_to_width(iommu->agaw);
3561 end = DOMAIN_MAX_ADDR(addr_width);
3562 end = end & VTD_PAGE_MASK;
4c5478c9 3563 if (end < dmar_domain->max_addr) {
fe40f1e0
WH
3564 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3565 "sufficient for the mapped address (%llx)\n",
4c5478c9 3566 __func__, iommu->agaw, dmar_domain->max_addr);
fe40f1e0
WH
3567 return -EFAULT;
3568 }
3569
5fe60f4e 3570 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
38717946 3571}
38717946 3572
4c5478c9
JR
3573static void intel_iommu_detach_device(struct iommu_domain *domain,
3574 struct device *dev)
38717946 3575{
4c5478c9
JR
3576 struct dmar_domain *dmar_domain = domain->priv;
3577 struct pci_dev *pdev = to_pci_dev(dev);
3578
2c2e2c38 3579 domain_remove_one_dev_info(dmar_domain, pdev);
faa3d6f5 3580}
c7151a8d 3581
dde57a21
JR
3582static int intel_iommu_map_range(struct iommu_domain *domain,
3583 unsigned long iova, phys_addr_t hpa,
3584 size_t size, int iommu_prot)
faa3d6f5 3585{
dde57a21 3586 struct dmar_domain *dmar_domain = domain->priv;
fe40f1e0
WH
3587 u64 max_addr;
3588 int addr_width;
dde57a21 3589 int prot = 0;
faa3d6f5 3590 int ret;
fe40f1e0 3591
dde57a21
JR
3592 if (iommu_prot & IOMMU_READ)
3593 prot |= DMA_PTE_READ;
3594 if (iommu_prot & IOMMU_WRITE)
3595 prot |= DMA_PTE_WRITE;
9cf06697
SY
3596 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3597 prot |= DMA_PTE_SNP;
dde57a21 3598
163cc52c 3599 max_addr = iova + size;
dde57a21 3600 if (dmar_domain->max_addr < max_addr) {
fe40f1e0
WH
3601 int min_agaw;
3602 u64 end;
3603
3604 /* check if minimum agaw is sufficient for mapped address */
dde57a21 3605 min_agaw = vm_domain_min_agaw(dmar_domain);
fe40f1e0
WH
3606 addr_width = agaw_to_width(min_agaw);
3607 end = DOMAIN_MAX_ADDR(addr_width);
3608 end = end & VTD_PAGE_MASK;
3609 if (end < max_addr) {
3610 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3611 "sufficient for the mapped address (%llx)\n",
3612 __func__, min_agaw, max_addr);
3613 return -EFAULT;
3614 }
dde57a21 3615 dmar_domain->max_addr = max_addr;
fe40f1e0 3616 }
ad051221
DW
3617 /* Round up size to next multiple of PAGE_SIZE, if it and
3618 the low bits of hpa would take us onto the next page */
88cb6a74 3619 size = aligned_nrpages(hpa, size);
ad051221
DW
3620 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3621 hpa >> VTD_PAGE_SHIFT, size, prot);
faa3d6f5 3622 return ret;
38717946 3623}
38717946 3624
dde57a21
JR
3625static void intel_iommu_unmap_range(struct iommu_domain *domain,
3626 unsigned long iova, size_t size)
38717946 3627{
dde57a21 3628 struct dmar_domain *dmar_domain = domain->priv;
faa3d6f5 3629
4b99d352
SY
3630 if (!size)
3631 return;
3632
163cc52c
DW
3633 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3634 (iova + size - 1) >> VTD_PAGE_SHIFT);
fe40f1e0 3635
163cc52c
DW
3636 if (dmar_domain->max_addr == iova + size)
3637 dmar_domain->max_addr = iova;
38717946 3638}
38717946 3639
d14d6577
JR
3640static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3641 unsigned long iova)
38717946 3642{
d14d6577 3643 struct dmar_domain *dmar_domain = domain->priv;
38717946 3644 struct dma_pte *pte;
faa3d6f5 3645 u64 phys = 0;
38717946 3646
b026fd28 3647 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
38717946 3648 if (pte)
faa3d6f5 3649 phys = dma_pte_addr(pte);
38717946 3650
faa3d6f5 3651 return phys;
38717946 3652}
a8bcbb0d 3653
dbb9fd86
SY
3654static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3655 unsigned long cap)
3656{
3657 struct dmar_domain *dmar_domain = domain->priv;
3658
3659 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3660 return dmar_domain->iommu_snooping;
3661
3662 return 0;
3663}
3664
a8bcbb0d
JR
3665static struct iommu_ops intel_iommu_ops = {
3666 .domain_init = intel_iommu_domain_init,
3667 .domain_destroy = intel_iommu_domain_destroy,
3668 .attach_dev = intel_iommu_attach_device,
3669 .detach_dev = intel_iommu_detach_device,
3670 .map = intel_iommu_map_range,
3671 .unmap = intel_iommu_unmap_range,
3672 .iova_to_phys = intel_iommu_iova_to_phys,
dbb9fd86 3673 .domain_has_cap = intel_iommu_domain_has_cap,
a8bcbb0d 3674};
9af88143
DW
3675
3676static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3677{
3678 /*
3679 * Mobile 4 Series Chipset neglects to set RWBF capability,
3680 * but needs it:
3681 */
3682 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3683 rwbf_quirk = 1;
3684}
3685
3686DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
e0fc7e0b
DW
3687
3688/* On Tylersburg chipsets, some BIOSes have been known to enable the
3689 ISOCH DMAR unit for the Azalia sound device, but not give it any
3690 TLB entries, which causes it to deadlock. Check for that. We do
3691 this in a function called from init_dmars(), instead of in a PCI
3692 quirk, because we don't want to print the obnoxious "BIOS broken"
3693 message if VT-d is actually disabled.
3694*/
3695static void __init check_tylersburg_isoch(void)
3696{
3697 struct pci_dev *pdev;
3698 uint32_t vtisochctrl;
3699
3700 /* If there's no Azalia in the system anyway, forget it. */
3701 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
3702 if (!pdev)
3703 return;
3704 pci_dev_put(pdev);
3705
3706 /* System Management Registers. Might be hidden, in which case
3707 we can't do the sanity check. But that's OK, because the
3708 known-broken BIOSes _don't_ actually hide it, so far. */
3709 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
3710 if (!pdev)
3711 return;
3712
3713 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
3714 pci_dev_put(pdev);
3715 return;
3716 }
3717
3718 pci_dev_put(pdev);
3719
3720 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
3721 if (vtisochctrl & 1)
3722 return;
3723
3724 /* Drop all bits other than the number of TLB entries */
3725 vtisochctrl &= 0x1c;
3726
3727 /* If we have the recommended number of TLB entries (16), fine. */
3728 if (vtisochctrl == 0x10)
3729 return;
3730
3731 /* Zero TLB entries? You get to ride the short bus to school. */
3732 if (!vtisochctrl) {
3733 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
3734 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3735 dmi_get_system_info(DMI_BIOS_VENDOR),
3736 dmi_get_system_info(DMI_BIOS_VERSION),
3737 dmi_get_system_info(DMI_PRODUCT_VERSION));
3738 iommu_identity_mapping |= IDENTMAP_AZALIA;
3739 return;
3740 }
3741
3742 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
3743 vtisochctrl);
3744}