2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <asm/cacheflush.h>
41 #include <asm/iommu.h>
44 #define ROOT_SIZE VTD_PAGE_SIZE
45 #define CONTEXT_SIZE VTD_PAGE_SIZE
47 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
48 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
50 #define IOAPIC_RANGE_START (0xfee00000)
51 #define IOAPIC_RANGE_END (0xfeefffff)
52 #define IOVA_START_ADDR (0x1000)
54 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
56 #define MAX_AGAW_WIDTH 64
58 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
59 #define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
61 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
62 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
63 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
66 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
67 are never going to work. */
68 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
70 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
73 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
75 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
77 static inline unsigned long page_to_dma_pfn(struct page *pg)
79 return mm_to_dma_pfn(page_to_pfn(pg));
81 static inline unsigned long virt_to_dma_pfn(void *p)
83 return page_to_dma_pfn(virt_to_page(p));
86 /* global iommu list, set NULL for ignored DMAR units */
87 static struct intel_iommu **g_iommus;
89 static int rwbf_quirk;
94 * 12-63: Context Ptr (12 - (haw-1))
101 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
102 static inline bool root_present(struct root_entry *root)
104 return (root->val & 1);
106 static inline void set_root_present(struct root_entry *root)
110 static inline void set_root_value(struct root_entry *root, unsigned long value)
112 root->val |= value & VTD_PAGE_MASK;
115 static inline struct context_entry *
116 get_context_addr_from_root(struct root_entry *root)
118 return (struct context_entry *)
119 (root_present(root)?phys_to_virt(
120 root->val & VTD_PAGE_MASK) :
127 * 1: fault processing disable
128 * 2-3: translation type
129 * 12-63: address space root
135 struct context_entry {
140 static inline bool context_present(struct context_entry *context)
142 return (context->lo & 1);
144 static inline void context_set_present(struct context_entry *context)
149 static inline void context_set_fault_enable(struct context_entry *context)
151 context->lo &= (((u64)-1) << 2) | 1;
154 static inline void context_set_translation_type(struct context_entry *context,
157 context->lo &= (((u64)-1) << 4) | 3;
158 context->lo |= (value & 3) << 2;
161 static inline void context_set_address_root(struct context_entry *context,
164 context->lo |= value & VTD_PAGE_MASK;
167 static inline void context_set_address_width(struct context_entry *context,
170 context->hi |= value & 7;
173 static inline void context_set_domain_id(struct context_entry *context,
176 context->hi |= (value & ((1 << 16) - 1)) << 8;
179 static inline void context_clear_entry(struct context_entry *context)
192 * 12-63: Host physcial address
198 static inline void dma_clear_pte(struct dma_pte *pte)
203 static inline void dma_set_pte_readable(struct dma_pte *pte)
205 pte->val |= DMA_PTE_READ;
208 static inline void dma_set_pte_writable(struct dma_pte *pte)
210 pte->val |= DMA_PTE_WRITE;
213 static inline void dma_set_pte_snp(struct dma_pte *pte)
215 pte->val |= DMA_PTE_SNP;
218 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
220 pte->val = (pte->val & ~3) | (prot & 3);
223 static inline u64 dma_pte_addr(struct dma_pte *pte)
226 return pte->val & VTD_PAGE_MASK;
228 /* Must have a full atomic 64-bit read */
229 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
233 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
235 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
238 static inline bool dma_pte_present(struct dma_pte *pte)
240 return (pte->val & 3) != 0;
243 static inline int first_pte_in_page(struct dma_pte *pte)
245 return !((unsigned long)pte & ~VTD_PAGE_MASK);
249 * This domain is a statically identity mapping domain.
250 * 1. This domain creats a static 1:1 mapping to all usable memory.
251 * 2. It maps to each iommu if successful.
252 * 3. Each iommu mapps to this domain if successful.
254 static struct dmar_domain *si_domain;
255 static int hw_pass_through = 1;
257 /* devices under the same p2p bridge are owned in one domain */
258 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
260 /* domain represents a virtual machine, more than one devices
261 * across iommus may be owned in one domain, e.g. kvm guest.
263 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
265 /* si_domain contains mulitple devices */
266 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
269 int id; /* domain id */
270 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
272 struct list_head devices; /* all devices' list */
273 struct iova_domain iovad; /* iova's that belong to this domain */
275 struct dma_pte *pgd; /* virtual address */
276 int gaw; /* max guest address width */
278 /* adjusted guest address width, 0 is level 2 30-bit */
281 int flags; /* flags to find out type of domain */
283 int iommu_coherency;/* indicate coherency of iommu access */
284 int iommu_snooping; /* indicate snooping control feature*/
285 int iommu_count; /* reference count of iommu */
286 spinlock_t iommu_lock; /* protect iommu set in domain */
287 u64 max_addr; /* maximum mapped address */
290 /* PCI domain-device relationship */
291 struct device_domain_info {
292 struct list_head link; /* link to domain siblings */
293 struct list_head global; /* link to global list */
294 int segment; /* PCI domain */
295 u8 bus; /* PCI bus number */
296 u8 devfn; /* PCI devfn number */
297 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
298 struct intel_iommu *iommu; /* IOMMU used by this device */
299 struct dmar_domain *domain; /* pointer to domain */
302 static void flush_unmaps_timeout(unsigned long data);
304 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
306 #define HIGH_WATER_MARK 250
307 struct deferred_flush_tables {
309 struct iova *iova[HIGH_WATER_MARK];
310 struct dmar_domain *domain[HIGH_WATER_MARK];
313 static struct deferred_flush_tables *deferred_flush;
315 /* bitmap for indexing intel_iommus */
316 static int g_num_of_iommus;
318 static DEFINE_SPINLOCK(async_umap_flush_lock);
319 static LIST_HEAD(unmaps_to_do);
322 static long list_size;
324 static void domain_remove_dev_info(struct dmar_domain *domain);
326 #ifdef CONFIG_DMAR_DEFAULT_ON
327 int dmar_disabled = 0;
329 int dmar_disabled = 1;
330 #endif /*CONFIG_DMAR_DEFAULT_ON*/
332 static int __initdata dmar_map_gfx = 1;
333 static int dmar_forcedac;
334 static int intel_iommu_strict;
336 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
337 static DEFINE_SPINLOCK(device_domain_lock);
338 static LIST_HEAD(device_domain_list);
340 static struct iommu_ops intel_iommu_ops;
342 static int __init intel_iommu_setup(char *str)
347 if (!strncmp(str, "on", 2)) {
349 printk(KERN_INFO "Intel-IOMMU: enabled\n");
350 } else if (!strncmp(str, "off", 3)) {
352 printk(KERN_INFO "Intel-IOMMU: disabled\n");
353 } else if (!strncmp(str, "igfx_off", 8)) {
356 "Intel-IOMMU: disable GFX device mapping\n");
357 } else if (!strncmp(str, "forcedac", 8)) {
359 "Intel-IOMMU: Forcing DAC for PCI devices\n");
361 } else if (!strncmp(str, "strict", 6)) {
363 "Intel-IOMMU: disable batched IOTLB flush\n");
364 intel_iommu_strict = 1;
367 str += strcspn(str, ",");
373 __setup("intel_iommu=", intel_iommu_setup);
375 static struct kmem_cache *iommu_domain_cache;
376 static struct kmem_cache *iommu_devinfo_cache;
377 static struct kmem_cache *iommu_iova_cache;
379 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
384 /* trying to avoid low memory issues */
385 flags = current->flags & PF_MEMALLOC;
386 current->flags |= PF_MEMALLOC;
387 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
388 current->flags &= (~PF_MEMALLOC | flags);
393 static inline void *alloc_pgtable_page(void)
398 /* trying to avoid low memory issues */
399 flags = current->flags & PF_MEMALLOC;
400 current->flags |= PF_MEMALLOC;
401 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
402 current->flags &= (~PF_MEMALLOC | flags);
406 static inline void free_pgtable_page(void *vaddr)
408 free_page((unsigned long)vaddr);
411 static inline void *alloc_domain_mem(void)
413 return iommu_kmem_cache_alloc(iommu_domain_cache);
416 static void free_domain_mem(void *vaddr)
418 kmem_cache_free(iommu_domain_cache, vaddr);
421 static inline void * alloc_devinfo_mem(void)
423 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
426 static inline void free_devinfo_mem(void *vaddr)
428 kmem_cache_free(iommu_devinfo_cache, vaddr);
431 struct iova *alloc_iova_mem(void)
433 return iommu_kmem_cache_alloc(iommu_iova_cache);
436 void free_iova_mem(struct iova *iova)
438 kmem_cache_free(iommu_iova_cache, iova);
442 static inline int width_to_agaw(int width);
444 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
449 sagaw = cap_sagaw(iommu->cap);
450 for (agaw = width_to_agaw(max_gaw);
452 if (test_bit(agaw, &sagaw))
460 * Calculate max SAGAW for each iommu.
462 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
464 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
468 * calculate agaw for each iommu.
469 * "SAGAW" may be different across iommus, use a default agaw, and
470 * get a supported less agaw for iommus that don't support the default agaw.
472 int iommu_calculate_agaw(struct intel_iommu *iommu)
474 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
477 /* This functionin only returns single iommu in a domain */
478 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
482 /* si_domain and vm domain should not get here. */
483 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
484 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
486 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
487 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
490 return g_iommus[iommu_id];
493 static void domain_update_iommu_coherency(struct dmar_domain *domain)
497 domain->iommu_coherency = 1;
499 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
500 for (; i < g_num_of_iommus; ) {
501 if (!ecap_coherent(g_iommus[i]->ecap)) {
502 domain->iommu_coherency = 0;
505 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
509 static void domain_update_iommu_snooping(struct dmar_domain *domain)
513 domain->iommu_snooping = 1;
515 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
516 for (; i < g_num_of_iommus; ) {
517 if (!ecap_sc_support(g_iommus[i]->ecap)) {
518 domain->iommu_snooping = 0;
521 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
525 /* Some capabilities may be different across iommus */
526 static void domain_update_iommu_cap(struct dmar_domain *domain)
528 domain_update_iommu_coherency(domain);
529 domain_update_iommu_snooping(domain);
532 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
534 struct dmar_drhd_unit *drhd = NULL;
537 for_each_drhd_unit(drhd) {
540 if (segment != drhd->segment)
543 for (i = 0; i < drhd->devices_cnt; i++) {
544 if (drhd->devices[i] &&
545 drhd->devices[i]->bus->number == bus &&
546 drhd->devices[i]->devfn == devfn)
548 if (drhd->devices[i] &&
549 drhd->devices[i]->subordinate &&
550 drhd->devices[i]->subordinate->number <= bus &&
551 drhd->devices[i]->subordinate->subordinate >= bus)
555 if (drhd->include_all)
562 static void domain_flush_cache(struct dmar_domain *domain,
563 void *addr, int size)
565 if (!domain->iommu_coherency)
566 clflush_cache_range(addr, size);
569 /* Gets context entry for a given bus and devfn */
570 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
573 struct root_entry *root;
574 struct context_entry *context;
575 unsigned long phy_addr;
578 spin_lock_irqsave(&iommu->lock, flags);
579 root = &iommu->root_entry[bus];
580 context = get_context_addr_from_root(root);
582 context = (struct context_entry *)alloc_pgtable_page();
584 spin_unlock_irqrestore(&iommu->lock, flags);
587 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
588 phy_addr = virt_to_phys((void *)context);
589 set_root_value(root, phy_addr);
590 set_root_present(root);
591 __iommu_flush_cache(iommu, root, sizeof(*root));
593 spin_unlock_irqrestore(&iommu->lock, flags);
594 return &context[devfn];
597 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
599 struct root_entry *root;
600 struct context_entry *context;
604 spin_lock_irqsave(&iommu->lock, flags);
605 root = &iommu->root_entry[bus];
606 context = get_context_addr_from_root(root);
611 ret = context_present(&context[devfn]);
613 spin_unlock_irqrestore(&iommu->lock, flags);
617 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
619 struct root_entry *root;
620 struct context_entry *context;
623 spin_lock_irqsave(&iommu->lock, flags);
624 root = &iommu->root_entry[bus];
625 context = get_context_addr_from_root(root);
627 context_clear_entry(&context[devfn]);
628 __iommu_flush_cache(iommu, &context[devfn], \
631 spin_unlock_irqrestore(&iommu->lock, flags);
634 static void free_context_table(struct intel_iommu *iommu)
636 struct root_entry *root;
639 struct context_entry *context;
641 spin_lock_irqsave(&iommu->lock, flags);
642 if (!iommu->root_entry) {
645 for (i = 0; i < ROOT_ENTRY_NR; i++) {
646 root = &iommu->root_entry[i];
647 context = get_context_addr_from_root(root);
649 free_pgtable_page(context);
651 free_pgtable_page(iommu->root_entry);
652 iommu->root_entry = NULL;
654 spin_unlock_irqrestore(&iommu->lock, flags);
657 /* page table handling */
658 #define LEVEL_STRIDE (9)
659 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
661 static inline int agaw_to_level(int agaw)
666 static inline int agaw_to_width(int agaw)
668 return 30 + agaw * LEVEL_STRIDE;
672 static inline int width_to_agaw(int width)
674 return (width - 30) / LEVEL_STRIDE;
677 static inline unsigned int level_to_offset_bits(int level)
679 return (level - 1) * LEVEL_STRIDE;
682 static inline int pfn_level_offset(unsigned long pfn, int level)
684 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
687 static inline unsigned long level_mask(int level)
689 return -1UL << level_to_offset_bits(level);
692 static inline unsigned long level_size(int level)
694 return 1UL << level_to_offset_bits(level);
697 static inline unsigned long align_to_level(unsigned long pfn, int level)
699 return (pfn + level_size(level) - 1) & level_mask(level);
702 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
705 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
706 struct dma_pte *parent, *pte = NULL;
707 int level = agaw_to_level(domain->agaw);
710 BUG_ON(!domain->pgd);
711 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
712 parent = domain->pgd;
717 offset = pfn_level_offset(pfn, level);
718 pte = &parent[offset];
722 if (!dma_pte_present(pte)) {
725 tmp_page = alloc_pgtable_page();
730 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
731 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
732 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
733 /* Someone else set it while we were thinking; use theirs. */
734 free_pgtable_page(tmp_page);
737 domain_flush_cache(domain, pte, sizeof(*pte));
740 parent = phys_to_virt(dma_pte_addr(pte));
747 /* return address's pte at specific level */
748 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
752 struct dma_pte *parent, *pte = NULL;
753 int total = agaw_to_level(domain->agaw);
756 parent = domain->pgd;
757 while (level <= total) {
758 offset = pfn_level_offset(pfn, total);
759 pte = &parent[offset];
763 if (!dma_pte_present(pte))
765 parent = phys_to_virt(dma_pte_addr(pte));
771 /* clear last level pte, a tlb flush should be followed */
772 static void dma_pte_clear_range(struct dmar_domain *domain,
773 unsigned long start_pfn,
774 unsigned long last_pfn)
776 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
777 struct dma_pte *first_pte, *pte;
779 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
780 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
782 /* we don't need lock here; nobody else touches the iova range */
783 while (start_pfn <= last_pfn) {
784 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
786 start_pfn = align_to_level(start_pfn + 1, 2);
793 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
795 domain_flush_cache(domain, first_pte,
796 (void *)pte - (void *)first_pte);
800 /* free page table pages. last level pte should already be cleared */
801 static void dma_pte_free_pagetable(struct dmar_domain *domain,
802 unsigned long start_pfn,
803 unsigned long last_pfn)
805 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
806 struct dma_pte *first_pte, *pte;
807 int total = agaw_to_level(domain->agaw);
811 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
812 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
814 /* We don't need lock here; nobody else touches the iova range */
816 while (level <= total) {
817 tmp = align_to_level(start_pfn, level);
819 /* If we can't even clear one PTE at this level, we're done */
820 if (tmp + level_size(level) - 1 > last_pfn)
823 while (tmp + level_size(level) - 1 <= last_pfn) {
824 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
826 tmp = align_to_level(tmp + 1, level + 1);
830 if (dma_pte_present(pte)) {
831 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
835 tmp += level_size(level);
836 } while (!first_pte_in_page(pte) &&
837 tmp + level_size(level) - 1 <= last_pfn);
839 domain_flush_cache(domain, first_pte,
840 (void *)pte - (void *)first_pte);
846 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
847 free_pgtable_page(domain->pgd);
853 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
855 struct root_entry *root;
858 root = (struct root_entry *)alloc_pgtable_page();
862 __iommu_flush_cache(iommu, root, ROOT_SIZE);
864 spin_lock_irqsave(&iommu->lock, flags);
865 iommu->root_entry = root;
866 spin_unlock_irqrestore(&iommu->lock, flags);
871 static void iommu_set_root_entry(struct intel_iommu *iommu)
877 addr = iommu->root_entry;
879 spin_lock_irqsave(&iommu->register_lock, flag);
880 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
882 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
884 /* Make sure hardware complete it */
885 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
886 readl, (sts & DMA_GSTS_RTPS), sts);
888 spin_unlock_irqrestore(&iommu->register_lock, flag);
891 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
896 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
899 spin_lock_irqsave(&iommu->register_lock, flag);
900 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
902 /* Make sure hardware complete it */
903 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
904 readl, (!(val & DMA_GSTS_WBFS)), val);
906 spin_unlock_irqrestore(&iommu->register_lock, flag);
909 /* return value determine if we need a write buffer flush */
910 static void __iommu_flush_context(struct intel_iommu *iommu,
911 u16 did, u16 source_id, u8 function_mask,
918 case DMA_CCMD_GLOBAL_INVL:
919 val = DMA_CCMD_GLOBAL_INVL;
921 case DMA_CCMD_DOMAIN_INVL:
922 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
924 case DMA_CCMD_DEVICE_INVL:
925 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
926 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
933 spin_lock_irqsave(&iommu->register_lock, flag);
934 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
936 /* Make sure hardware complete it */
937 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
938 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
940 spin_unlock_irqrestore(&iommu->register_lock, flag);
943 /* return value determine if we need a write buffer flush */
944 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
945 u64 addr, unsigned int size_order, u64 type)
947 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
948 u64 val = 0, val_iva = 0;
952 case DMA_TLB_GLOBAL_FLUSH:
953 /* global flush doesn't need set IVA_REG */
954 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
956 case DMA_TLB_DSI_FLUSH:
957 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
959 case DMA_TLB_PSI_FLUSH:
960 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
961 /* Note: always flush non-leaf currently */
962 val_iva = size_order | addr;
967 /* Note: set drain read/write */
970 * This is probably to be super secure.. Looks like we can
971 * ignore it without any impact.
973 if (cap_read_drain(iommu->cap))
974 val |= DMA_TLB_READ_DRAIN;
976 if (cap_write_drain(iommu->cap))
977 val |= DMA_TLB_WRITE_DRAIN;
979 spin_lock_irqsave(&iommu->register_lock, flag);
980 /* Note: Only uses first TLB reg currently */
982 dmar_writeq(iommu->reg + tlb_offset, val_iva);
983 dmar_writeq(iommu->reg + tlb_offset + 8, val);
985 /* Make sure hardware complete it */
986 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
987 dmar_readq, (!(val & DMA_TLB_IVT)), val);
989 spin_unlock_irqrestore(&iommu->register_lock, flag);
991 /* check IOTLB invalidation granularity */
992 if (DMA_TLB_IAIG(val) == 0)
993 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
994 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
995 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
996 (unsigned long long)DMA_TLB_IIRG(type),
997 (unsigned long long)DMA_TLB_IAIG(val));
1000 static struct device_domain_info *iommu_support_dev_iotlb(
1001 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1004 unsigned long flags;
1005 struct device_domain_info *info;
1006 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1008 if (!ecap_dev_iotlb_support(iommu->ecap))
1014 spin_lock_irqsave(&device_domain_lock, flags);
1015 list_for_each_entry(info, &domain->devices, link)
1016 if (info->bus == bus && info->devfn == devfn) {
1020 spin_unlock_irqrestore(&device_domain_lock, flags);
1022 if (!found || !info->dev)
1025 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1028 if (!dmar_find_matched_atsr_unit(info->dev))
1031 info->iommu = iommu;
1036 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1041 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1044 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1046 if (!info->dev || !pci_ats_enabled(info->dev))
1049 pci_disable_ats(info->dev);
1052 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1053 u64 addr, unsigned mask)
1056 unsigned long flags;
1057 struct device_domain_info *info;
1059 spin_lock_irqsave(&device_domain_lock, flags);
1060 list_for_each_entry(info, &domain->devices, link) {
1061 if (!info->dev || !pci_ats_enabled(info->dev))
1064 sid = info->bus << 8 | info->devfn;
1065 qdep = pci_ats_queue_depth(info->dev);
1066 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1068 spin_unlock_irqrestore(&device_domain_lock, flags);
1071 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1072 unsigned long pfn, unsigned int pages)
1074 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1075 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1080 * Fallback to domain selective flush if no PSI support or the size is
1082 * PSI requires page size to be 2 ^ x, and the base address is naturally
1083 * aligned to the size
1085 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1086 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1089 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1093 * In caching mode, domain ID 0 is reserved for non-present to present
1094 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1096 if (!cap_caching_mode(iommu->cap) || did)
1097 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1100 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1103 unsigned long flags;
1105 spin_lock_irqsave(&iommu->register_lock, flags);
1106 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1107 pmen &= ~DMA_PMEN_EPM;
1108 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1110 /* wait for the protected region status bit to clear */
1111 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1112 readl, !(pmen & DMA_PMEN_PRS), pmen);
1114 spin_unlock_irqrestore(&iommu->register_lock, flags);
1117 static int iommu_enable_translation(struct intel_iommu *iommu)
1120 unsigned long flags;
1122 spin_lock_irqsave(&iommu->register_lock, flags);
1123 iommu->gcmd |= DMA_GCMD_TE;
1124 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1126 /* Make sure hardware complete it */
1127 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1128 readl, (sts & DMA_GSTS_TES), sts);
1130 spin_unlock_irqrestore(&iommu->register_lock, flags);
1134 static int iommu_disable_translation(struct intel_iommu *iommu)
1139 spin_lock_irqsave(&iommu->register_lock, flag);
1140 iommu->gcmd &= ~DMA_GCMD_TE;
1141 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1143 /* Make sure hardware complete it */
1144 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1145 readl, (!(sts & DMA_GSTS_TES)), sts);
1147 spin_unlock_irqrestore(&iommu->register_lock, flag);
1152 static int iommu_init_domains(struct intel_iommu *iommu)
1154 unsigned long ndomains;
1155 unsigned long nlongs;
1157 ndomains = cap_ndoms(iommu->cap);
1158 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1159 nlongs = BITS_TO_LONGS(ndomains);
1161 spin_lock_init(&iommu->lock);
1163 /* TBD: there might be 64K domains,
1164 * consider other allocation for future chip
1166 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1167 if (!iommu->domain_ids) {
1168 printk(KERN_ERR "Allocating domain id array failed\n");
1171 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1173 if (!iommu->domains) {
1174 printk(KERN_ERR "Allocating domain array failed\n");
1179 * if Caching mode is set, then invalid translations are tagged
1180 * with domainid 0. Hence we need to pre-allocate it.
1182 if (cap_caching_mode(iommu->cap))
1183 set_bit(0, iommu->domain_ids);
1188 static void domain_exit(struct dmar_domain *domain);
1189 static void vm_domain_exit(struct dmar_domain *domain);
1191 void free_dmar_iommu(struct intel_iommu *iommu)
1193 struct dmar_domain *domain;
1195 unsigned long flags;
1197 if ((iommu->domains) && (iommu->domain_ids)) {
1198 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1199 for (; i < cap_ndoms(iommu->cap); ) {
1200 domain = iommu->domains[i];
1201 clear_bit(i, iommu->domain_ids);
1203 spin_lock_irqsave(&domain->iommu_lock, flags);
1204 if (--domain->iommu_count == 0) {
1205 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1206 vm_domain_exit(domain);
1208 domain_exit(domain);
1210 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1212 i = find_next_bit(iommu->domain_ids,
1213 cap_ndoms(iommu->cap), i+1);
1217 if (iommu->gcmd & DMA_GCMD_TE)
1218 iommu_disable_translation(iommu);
1221 set_irq_data(iommu->irq, NULL);
1222 /* This will mask the irq */
1223 free_irq(iommu->irq, iommu);
1224 destroy_irq(iommu->irq);
1227 kfree(iommu->domains);
1228 kfree(iommu->domain_ids);
1230 g_iommus[iommu->seq_id] = NULL;
1232 /* if all iommus are freed, free g_iommus */
1233 for (i = 0; i < g_num_of_iommus; i++) {
1238 if (i == g_num_of_iommus)
1241 /* free context mapping */
1242 free_context_table(iommu);
1245 static struct dmar_domain *alloc_domain(void)
1247 struct dmar_domain *domain;
1249 domain = alloc_domain_mem();
1253 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1259 static int iommu_attach_domain(struct dmar_domain *domain,
1260 struct intel_iommu *iommu)
1263 unsigned long ndomains;
1264 unsigned long flags;
1266 ndomains = cap_ndoms(iommu->cap);
1268 spin_lock_irqsave(&iommu->lock, flags);
1270 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1271 if (num >= ndomains) {
1272 spin_unlock_irqrestore(&iommu->lock, flags);
1273 printk(KERN_ERR "IOMMU: no free domain ids\n");
1278 set_bit(num, iommu->domain_ids);
1279 set_bit(iommu->seq_id, &domain->iommu_bmp);
1280 iommu->domains[num] = domain;
1281 spin_unlock_irqrestore(&iommu->lock, flags);
1286 static void iommu_detach_domain(struct dmar_domain *domain,
1287 struct intel_iommu *iommu)
1289 unsigned long flags;
1293 spin_lock_irqsave(&iommu->lock, flags);
1294 ndomains = cap_ndoms(iommu->cap);
1295 num = find_first_bit(iommu->domain_ids, ndomains);
1296 for (; num < ndomains; ) {
1297 if (iommu->domains[num] == domain) {
1301 num = find_next_bit(iommu->domain_ids,
1302 cap_ndoms(iommu->cap), num+1);
1306 clear_bit(num, iommu->domain_ids);
1307 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1308 iommu->domains[num] = NULL;
1310 spin_unlock_irqrestore(&iommu->lock, flags);
1313 static struct iova_domain reserved_iova_list;
1314 static struct lock_class_key reserved_rbtree_key;
1316 static void dmar_init_reserved_ranges(void)
1318 struct pci_dev *pdev = NULL;
1322 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1324 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1325 &reserved_rbtree_key);
1327 /* IOAPIC ranges shouldn't be accessed by DMA */
1328 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1329 IOVA_PFN(IOAPIC_RANGE_END));
1331 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1333 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1334 for_each_pci_dev(pdev) {
1337 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1338 r = &pdev->resource[i];
1339 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1341 iova = reserve_iova(&reserved_iova_list,
1345 printk(KERN_ERR "Reserve iova failed\n");
1351 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1353 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1356 static inline int guestwidth_to_adjustwidth(int gaw)
1359 int r = (gaw - 12) % 9;
1370 static int domain_init(struct dmar_domain *domain, int guest_width)
1372 struct intel_iommu *iommu;
1373 int adjust_width, agaw;
1374 unsigned long sagaw;
1376 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1377 spin_lock_init(&domain->iommu_lock);
1379 domain_reserve_special_ranges(domain);
1381 /* calculate AGAW */
1382 iommu = domain_get_iommu(domain);
1383 if (guest_width > cap_mgaw(iommu->cap))
1384 guest_width = cap_mgaw(iommu->cap);
1385 domain->gaw = guest_width;
1386 adjust_width = guestwidth_to_adjustwidth(guest_width);
1387 agaw = width_to_agaw(adjust_width);
1388 sagaw = cap_sagaw(iommu->cap);
1389 if (!test_bit(agaw, &sagaw)) {
1390 /* hardware doesn't support it, choose a bigger one */
1391 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1392 agaw = find_next_bit(&sagaw, 5, agaw);
1396 domain->agaw = agaw;
1397 INIT_LIST_HEAD(&domain->devices);
1399 if (ecap_coherent(iommu->ecap))
1400 domain->iommu_coherency = 1;
1402 domain->iommu_coherency = 0;
1404 if (ecap_sc_support(iommu->ecap))
1405 domain->iommu_snooping = 1;
1407 domain->iommu_snooping = 0;
1409 domain->iommu_count = 1;
1411 /* always allocate the top pgd */
1412 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1415 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1419 static void domain_exit(struct dmar_domain *domain)
1421 struct dmar_drhd_unit *drhd;
1422 struct intel_iommu *iommu;
1424 /* Domain 0 is reserved, so dont process it */
1428 domain_remove_dev_info(domain);
1430 put_iova_domain(&domain->iovad);
1433 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1435 /* free page tables */
1436 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1438 for_each_active_iommu(iommu, drhd)
1439 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1440 iommu_detach_domain(domain, iommu);
1442 free_domain_mem(domain);
1445 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1446 u8 bus, u8 devfn, int translation)
1448 struct context_entry *context;
1449 unsigned long flags;
1450 struct intel_iommu *iommu;
1451 struct dma_pte *pgd;
1453 unsigned long ndomains;
1456 struct device_domain_info *info = NULL;
1458 pr_debug("Set context mapping for %02x:%02x.%d\n",
1459 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1461 BUG_ON(!domain->pgd);
1462 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1463 translation != CONTEXT_TT_MULTI_LEVEL);
1465 iommu = device_to_iommu(segment, bus, devfn);
1469 context = device_to_context_entry(iommu, bus, devfn);
1472 spin_lock_irqsave(&iommu->lock, flags);
1473 if (context_present(context)) {
1474 spin_unlock_irqrestore(&iommu->lock, flags);
1481 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1482 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1485 /* find an available domain id for this device in iommu */
1486 ndomains = cap_ndoms(iommu->cap);
1487 num = find_first_bit(iommu->domain_ids, ndomains);
1488 for (; num < ndomains; ) {
1489 if (iommu->domains[num] == domain) {
1494 num = find_next_bit(iommu->domain_ids,
1495 cap_ndoms(iommu->cap), num+1);
1499 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1500 if (num >= ndomains) {
1501 spin_unlock_irqrestore(&iommu->lock, flags);
1502 printk(KERN_ERR "IOMMU: no free domain ids\n");
1506 set_bit(num, iommu->domain_ids);
1507 iommu->domains[num] = domain;
1511 /* Skip top levels of page tables for
1512 * iommu which has less agaw than default.
1514 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1515 pgd = phys_to_virt(dma_pte_addr(pgd));
1516 if (!dma_pte_present(pgd)) {
1517 spin_unlock_irqrestore(&iommu->lock, flags);
1523 context_set_domain_id(context, id);
1525 if (translation != CONTEXT_TT_PASS_THROUGH) {
1526 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1527 translation = info ? CONTEXT_TT_DEV_IOTLB :
1528 CONTEXT_TT_MULTI_LEVEL;
1531 * In pass through mode, AW must be programmed to indicate the largest
1532 * AGAW value supported by hardware. And ASR is ignored by hardware.
1534 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1535 context_set_address_width(context, iommu->msagaw);
1537 context_set_address_root(context, virt_to_phys(pgd));
1538 context_set_address_width(context, iommu->agaw);
1541 context_set_translation_type(context, translation);
1542 context_set_fault_enable(context);
1543 context_set_present(context);
1544 domain_flush_cache(domain, context, sizeof(*context));
1547 * It's a non-present to present mapping. If hardware doesn't cache
1548 * non-present entry we only need to flush the write-buffer. If the
1549 * _does_ cache non-present entries, then it does so in the special
1550 * domain #0, which we have to flush:
1552 if (cap_caching_mode(iommu->cap)) {
1553 iommu->flush.flush_context(iommu, 0,
1554 (((u16)bus) << 8) | devfn,
1555 DMA_CCMD_MASK_NOBIT,
1556 DMA_CCMD_DEVICE_INVL);
1557 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1559 iommu_flush_write_buffer(iommu);
1561 iommu_enable_dev_iotlb(info);
1562 spin_unlock_irqrestore(&iommu->lock, flags);
1564 spin_lock_irqsave(&domain->iommu_lock, flags);
1565 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1566 domain->iommu_count++;
1567 domain_update_iommu_cap(domain);
1569 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1574 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1578 struct pci_dev *tmp, *parent;
1580 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1581 pdev->bus->number, pdev->devfn,
1586 /* dependent device mapping */
1587 tmp = pci_find_upstream_pcie_bridge(pdev);
1590 /* Secondary interface's bus number and devfn 0 */
1591 parent = pdev->bus->self;
1592 while (parent != tmp) {
1593 ret = domain_context_mapping_one(domain,
1594 pci_domain_nr(parent->bus),
1595 parent->bus->number,
1596 parent->devfn, translation);
1599 parent = parent->bus->self;
1601 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1602 return domain_context_mapping_one(domain,
1603 pci_domain_nr(tmp->subordinate),
1604 tmp->subordinate->number, 0,
1606 else /* this is a legacy PCI bridge */
1607 return domain_context_mapping_one(domain,
1608 pci_domain_nr(tmp->bus),
1614 static int domain_context_mapped(struct pci_dev *pdev)
1617 struct pci_dev *tmp, *parent;
1618 struct intel_iommu *iommu;
1620 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1625 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1628 /* dependent device mapping */
1629 tmp = pci_find_upstream_pcie_bridge(pdev);
1632 /* Secondary interface's bus number and devfn 0 */
1633 parent = pdev->bus->self;
1634 while (parent != tmp) {
1635 ret = device_context_mapped(iommu, parent->bus->number,
1639 parent = parent->bus->self;
1642 return device_context_mapped(iommu, tmp->subordinate->number,
1645 return device_context_mapped(iommu, tmp->bus->number,
1649 /* Returns a number of VTD pages, but aligned to MM page size */
1650 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1653 host_addr &= ~PAGE_MASK;
1654 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1657 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1658 struct scatterlist *sg, unsigned long phys_pfn,
1659 unsigned long nr_pages, int prot)
1661 struct dma_pte *first_pte = NULL, *pte = NULL;
1662 phys_addr_t uninitialized_var(pteval);
1663 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1664 unsigned long sg_res;
1666 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1668 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1671 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1676 sg_res = nr_pages + 1;
1677 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1680 while (nr_pages--) {
1684 sg_res = aligned_nrpages(sg->offset, sg->length);
1685 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1686 sg->dma_length = sg->length;
1687 pteval = page_to_phys(sg_page(sg)) | prot;
1690 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1694 /* We don't need lock here, nobody else
1695 * touches the iova range
1697 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1699 static int dumps = 5;
1700 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1701 iov_pfn, tmp, (unsigned long long)pteval);
1704 debug_dma_dump_mappings(NULL);
1709 if (!nr_pages || first_pte_in_page(pte)) {
1710 domain_flush_cache(domain, first_pte,
1711 (void *)pte - (void *)first_pte);
1715 pteval += VTD_PAGE_SIZE;
1723 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1724 struct scatterlist *sg, unsigned long nr_pages,
1727 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1730 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1731 unsigned long phys_pfn, unsigned long nr_pages,
1734 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1737 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1742 clear_context_table(iommu, bus, devfn);
1743 iommu->flush.flush_context(iommu, 0, 0, 0,
1744 DMA_CCMD_GLOBAL_INVL);
1745 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1748 static void domain_remove_dev_info(struct dmar_domain *domain)
1750 struct device_domain_info *info;
1751 unsigned long flags;
1752 struct intel_iommu *iommu;
1754 spin_lock_irqsave(&device_domain_lock, flags);
1755 while (!list_empty(&domain->devices)) {
1756 info = list_entry(domain->devices.next,
1757 struct device_domain_info, link);
1758 list_del(&info->link);
1759 list_del(&info->global);
1761 info->dev->dev.archdata.iommu = NULL;
1762 spin_unlock_irqrestore(&device_domain_lock, flags);
1764 iommu_disable_dev_iotlb(info);
1765 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1766 iommu_detach_dev(iommu, info->bus, info->devfn);
1767 free_devinfo_mem(info);
1769 spin_lock_irqsave(&device_domain_lock, flags);
1771 spin_unlock_irqrestore(&device_domain_lock, flags);
1776 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1778 static struct dmar_domain *
1779 find_domain(struct pci_dev *pdev)
1781 struct device_domain_info *info;
1783 /* No lock here, assumes no domain exit in normal case */
1784 info = pdev->dev.archdata.iommu;
1786 return info->domain;
1790 /* domain is initialized */
1791 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1793 struct dmar_domain *domain, *found = NULL;
1794 struct intel_iommu *iommu;
1795 struct dmar_drhd_unit *drhd;
1796 struct device_domain_info *info, *tmp;
1797 struct pci_dev *dev_tmp;
1798 unsigned long flags;
1799 int bus = 0, devfn = 0;
1803 domain = find_domain(pdev);
1807 segment = pci_domain_nr(pdev->bus);
1809 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1811 if (dev_tmp->is_pcie) {
1812 bus = dev_tmp->subordinate->number;
1815 bus = dev_tmp->bus->number;
1816 devfn = dev_tmp->devfn;
1818 spin_lock_irqsave(&device_domain_lock, flags);
1819 list_for_each_entry(info, &device_domain_list, global) {
1820 if (info->segment == segment &&
1821 info->bus == bus && info->devfn == devfn) {
1822 found = info->domain;
1826 spin_unlock_irqrestore(&device_domain_lock, flags);
1827 /* pcie-pci bridge already has a domain, uses it */
1834 domain = alloc_domain();
1838 /* Allocate new domain for the device */
1839 drhd = dmar_find_matched_drhd_unit(pdev);
1841 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1845 iommu = drhd->iommu;
1847 ret = iommu_attach_domain(domain, iommu);
1849 domain_exit(domain);
1853 if (domain_init(domain, gaw)) {
1854 domain_exit(domain);
1858 /* register pcie-to-pci device */
1860 info = alloc_devinfo_mem();
1862 domain_exit(domain);
1865 info->segment = segment;
1867 info->devfn = devfn;
1869 info->domain = domain;
1870 /* This domain is shared by devices under p2p bridge */
1871 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1873 /* pcie-to-pci bridge already has a domain, uses it */
1875 spin_lock_irqsave(&device_domain_lock, flags);
1876 list_for_each_entry(tmp, &device_domain_list, global) {
1877 if (tmp->segment == segment &&
1878 tmp->bus == bus && tmp->devfn == devfn) {
1879 found = tmp->domain;
1884 free_devinfo_mem(info);
1885 domain_exit(domain);
1888 list_add(&info->link, &domain->devices);
1889 list_add(&info->global, &device_domain_list);
1891 spin_unlock_irqrestore(&device_domain_lock, flags);
1895 info = alloc_devinfo_mem();
1898 info->segment = segment;
1899 info->bus = pdev->bus->number;
1900 info->devfn = pdev->devfn;
1902 info->domain = domain;
1903 spin_lock_irqsave(&device_domain_lock, flags);
1904 /* somebody is fast */
1905 found = find_domain(pdev);
1906 if (found != NULL) {
1907 spin_unlock_irqrestore(&device_domain_lock, flags);
1908 if (found != domain) {
1909 domain_exit(domain);
1912 free_devinfo_mem(info);
1915 list_add(&info->link, &domain->devices);
1916 list_add(&info->global, &device_domain_list);
1917 pdev->dev.archdata.iommu = info;
1918 spin_unlock_irqrestore(&device_domain_lock, flags);
1921 /* recheck it here, maybe others set it */
1922 return find_domain(pdev);
1925 static int iommu_identity_mapping;
1927 static int iommu_domain_identity_map(struct dmar_domain *domain,
1928 unsigned long long start,
1929 unsigned long long end)
1931 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1932 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1934 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1935 dma_to_mm_pfn(last_vpfn))) {
1936 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1940 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1941 start, end, domain->id);
1943 * RMRR range might have overlap with physical memory range,
1946 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1948 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1949 last_vpfn - first_vpfn + 1,
1950 DMA_PTE_READ|DMA_PTE_WRITE);
1953 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1954 unsigned long long start,
1955 unsigned long long end)
1957 struct dmar_domain *domain;
1960 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1964 /* For _hardware_ passthrough, don't bother. But for software
1965 passthrough, we do it anyway -- it may indicate a memory
1966 range which is reserved in E820, so which didn't get set
1967 up to start with in si_domain */
1968 if (domain == si_domain && hw_pass_through) {
1969 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1970 pci_name(pdev), start, end);
1975 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1976 pci_name(pdev), start, end);
1978 ret = iommu_domain_identity_map(domain, start, end);
1982 /* context entry init */
1983 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1990 domain_exit(domain);
1994 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1995 struct pci_dev *pdev)
1997 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
1999 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2000 rmrr->end_address + 1);
2003 #ifdef CONFIG_DMAR_FLOPPY_WA
2004 static inline void iommu_prepare_isa(void)
2006 struct pci_dev *pdev;
2009 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2013 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2014 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2017 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2018 "floppy might not work\n");
2022 static inline void iommu_prepare_isa(void)
2026 #endif /* !CONFIG_DMAR_FLPY_WA */
2028 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2030 static int __init si_domain_work_fn(unsigned long start_pfn,
2031 unsigned long end_pfn, void *datax)
2035 *ret = iommu_domain_identity_map(si_domain,
2036 (uint64_t)start_pfn << PAGE_SHIFT,
2037 (uint64_t)end_pfn << PAGE_SHIFT);
2042 static int __init si_domain_init(int hw)
2044 struct dmar_drhd_unit *drhd;
2045 struct intel_iommu *iommu;
2048 si_domain = alloc_domain();
2052 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2054 for_each_active_iommu(iommu, drhd) {
2055 ret = iommu_attach_domain(si_domain, iommu);
2057 domain_exit(si_domain);
2062 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2063 domain_exit(si_domain);
2067 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2072 for_each_online_node(nid) {
2073 work_with_active_regions(nid, si_domain_work_fn, &ret);
2081 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2082 struct pci_dev *pdev);
2083 static int identity_mapping(struct pci_dev *pdev)
2085 struct device_domain_info *info;
2087 if (likely(!iommu_identity_mapping))
2091 list_for_each_entry(info, &si_domain->devices, link)
2092 if (info->dev == pdev)
2097 static int domain_add_dev_info(struct dmar_domain *domain,
2098 struct pci_dev *pdev,
2101 struct device_domain_info *info;
2102 unsigned long flags;
2105 info = alloc_devinfo_mem();
2109 ret = domain_context_mapping(domain, pdev, translation);
2111 free_devinfo_mem(info);
2115 info->segment = pci_domain_nr(pdev->bus);
2116 info->bus = pdev->bus->number;
2117 info->devfn = pdev->devfn;
2119 info->domain = domain;
2121 spin_lock_irqsave(&device_domain_lock, flags);
2122 list_add(&info->link, &domain->devices);
2123 list_add(&info->global, &device_domain_list);
2124 pdev->dev.archdata.iommu = info;
2125 spin_unlock_irqrestore(&device_domain_lock, flags);
2130 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2132 if (iommu_identity_mapping == 2)
2133 return IS_GFX_DEVICE(pdev);
2136 * We want to start off with all devices in the 1:1 domain, and
2137 * take them out later if we find they can't access all of memory.
2139 * However, we can't do this for PCI devices behind bridges,
2140 * because all PCI devices behind the same bridge will end up
2141 * with the same source-id on their transactions.
2143 * Practically speaking, we can't change things around for these
2144 * devices at run-time, because we can't be sure there'll be no
2145 * DMA transactions in flight for any of their siblings.
2147 * So PCI devices (unless they're on the root bus) as well as
2148 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2149 * the 1:1 domain, just in _case_ one of their siblings turns out
2150 * not to be able to map all of memory.
2152 if (!pdev->is_pcie) {
2153 if (!pci_is_root_bus(pdev->bus))
2155 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2157 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2161 * At boot time, we don't yet know if devices will be 64-bit capable.
2162 * Assume that they will -- if they turn out not to be, then we can
2163 * take them out of the 1:1 domain later.
2166 return pdev->dma_mask > DMA_BIT_MASK(32);
2171 static int __init iommu_prepare_static_identity_mapping(int hw)
2173 struct pci_dev *pdev = NULL;
2176 ret = si_domain_init(hw);
2180 for_each_pci_dev(pdev) {
2181 if (iommu_should_identity_map(pdev, 1)) {
2182 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2183 hw ? "hardware" : "software", pci_name(pdev));
2185 ret = domain_add_dev_info(si_domain, pdev,
2186 hw ? CONTEXT_TT_PASS_THROUGH :
2187 CONTEXT_TT_MULTI_LEVEL);
2196 int __init init_dmars(void)
2198 struct dmar_drhd_unit *drhd;
2199 struct dmar_rmrr_unit *rmrr;
2200 struct pci_dev *pdev;
2201 struct intel_iommu *iommu;
2207 * initialize and program root entry to not present
2210 for_each_drhd_unit(drhd) {
2213 * lock not needed as this is only incremented in the single
2214 * threaded kernel __init code path all other access are read
2219 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2222 printk(KERN_ERR "Allocating global iommu array failed\n");
2227 deferred_flush = kzalloc(g_num_of_iommus *
2228 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2229 if (!deferred_flush) {
2234 for_each_drhd_unit(drhd) {
2238 iommu = drhd->iommu;
2239 g_iommus[iommu->seq_id] = iommu;
2241 ret = iommu_init_domains(iommu);
2247 * we could share the same root & context tables
2248 * amoung all IOMMU's. Need to Split it later.
2250 ret = iommu_alloc_root_entry(iommu);
2252 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2255 if (!ecap_pass_through(iommu->ecap))
2256 hw_pass_through = 0;
2260 * Start from the sane iommu hardware state.
2262 for_each_drhd_unit(drhd) {
2266 iommu = drhd->iommu;
2269 * If the queued invalidation is already initialized by us
2270 * (for example, while enabling interrupt-remapping) then
2271 * we got the things already rolling from a sane state.
2277 * Clear any previous faults.
2279 dmar_fault(-1, iommu);
2281 * Disable queued invalidation if supported and already enabled
2282 * before OS handover.
2284 dmar_disable_qi(iommu);
2287 for_each_drhd_unit(drhd) {
2291 iommu = drhd->iommu;
2293 if (dmar_enable_qi(iommu)) {
2295 * Queued Invalidate not enabled, use Register Based
2298 iommu->flush.flush_context = __iommu_flush_context;
2299 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2300 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2302 (unsigned long long)drhd->reg_base_addr);
2304 iommu->flush.flush_context = qi_flush_context;
2305 iommu->flush.flush_iotlb = qi_flush_iotlb;
2306 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2308 (unsigned long long)drhd->reg_base_addr);
2312 if (iommu_pass_through)
2313 iommu_identity_mapping = 1;
2314 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2316 iommu_identity_mapping = 2;
2319 * If pass through is not set or not enabled, setup context entries for
2320 * identity mappings for rmrr, gfx, and isa and may fall back to static
2321 * identity mapping if iommu_identity_mapping is set.
2323 if (iommu_identity_mapping) {
2324 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2326 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2332 * for each dev attached to rmrr
2334 * locate drhd for dev, alloc domain for dev
2335 * allocate free domain
2336 * allocate page table entries for rmrr
2337 * if context not allocated for bus
2338 * allocate and init context
2339 * set present in root table for this bus
2340 * init context with domain, translation etc
2344 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2345 for_each_rmrr_units(rmrr) {
2346 for (i = 0; i < rmrr->devices_cnt; i++) {
2347 pdev = rmrr->devices[i];
2349 * some BIOS lists non-exist devices in DMAR
2354 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2357 "IOMMU: mapping reserved region failed\n");
2361 iommu_prepare_isa();
2366 * global invalidate context cache
2367 * global invalidate iotlb
2368 * enable translation
2370 for_each_drhd_unit(drhd) {
2373 iommu = drhd->iommu;
2375 iommu_flush_write_buffer(iommu);
2377 ret = dmar_set_interrupt(iommu);
2381 iommu_set_root_entry(iommu);
2383 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2384 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2385 iommu_disable_protect_mem_regions(iommu);
2387 ret = iommu_enable_translation(iommu);
2394 for_each_drhd_unit(drhd) {
2397 iommu = drhd->iommu;
2404 /* This takes a number of _MM_ pages, not VTD pages */
2405 static struct iova *intel_alloc_iova(struct device *dev,
2406 struct dmar_domain *domain,
2407 unsigned long nrpages, uint64_t dma_mask)
2409 struct pci_dev *pdev = to_pci_dev(dev);
2410 struct iova *iova = NULL;
2412 /* Restrict dma_mask to the width that the iommu can handle */
2413 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2415 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2417 * First try to allocate an io virtual address in
2418 * DMA_BIT_MASK(32) and if that fails then try allocating
2421 iova = alloc_iova(&domain->iovad, nrpages,
2422 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2426 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2427 if (unlikely(!iova)) {
2428 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2429 nrpages, pci_name(pdev));
2436 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2438 struct dmar_domain *domain;
2441 domain = get_domain_for_dev(pdev,
2442 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2445 "Allocating domain for %s failed", pci_name(pdev));
2449 /* make sure context mapping is ok */
2450 if (unlikely(!domain_context_mapped(pdev))) {
2451 ret = domain_context_mapping(domain, pdev,
2452 CONTEXT_TT_MULTI_LEVEL);
2455 "Domain context map for %s failed",
2464 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2466 struct device_domain_info *info;
2468 /* No lock here, assumes no domain exit in normal case */
2469 info = dev->dev.archdata.iommu;
2471 return info->domain;
2473 return __get_valid_domain_for_dev(dev);
2476 static int iommu_dummy(struct pci_dev *pdev)
2478 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2481 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2482 static int iommu_no_mapping(struct device *dev)
2484 struct pci_dev *pdev;
2487 if (unlikely(dev->bus != &pci_bus_type))
2490 pdev = to_pci_dev(dev);
2491 if (iommu_dummy(pdev))
2494 if (!iommu_identity_mapping)
2497 found = identity_mapping(pdev);
2499 if (iommu_should_identity_map(pdev, 0))
2503 * 32 bit DMA is removed from si_domain and fall back
2504 * to non-identity mapping.
2506 domain_remove_one_dev_info(si_domain, pdev);
2507 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2513 * In case of a detached 64 bit DMA device from vm, the device
2514 * is put into si_domain for identity mapping.
2516 if (iommu_should_identity_map(pdev, 0)) {
2518 ret = domain_add_dev_info(si_domain, pdev,
2520 CONTEXT_TT_PASS_THROUGH :
2521 CONTEXT_TT_MULTI_LEVEL);
2523 printk(KERN_INFO "64bit %s uses identity mapping\n",
2533 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2534 size_t size, int dir, u64 dma_mask)
2536 struct pci_dev *pdev = to_pci_dev(hwdev);
2537 struct dmar_domain *domain;
2538 phys_addr_t start_paddr;
2542 struct intel_iommu *iommu;
2543 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2545 BUG_ON(dir == DMA_NONE);
2547 if (iommu_no_mapping(hwdev))
2550 domain = get_valid_domain_for_dev(pdev);
2554 iommu = domain_get_iommu(domain);
2555 size = aligned_nrpages(paddr, size);
2557 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2563 * Check if DMAR supports zero-length reads on write only
2566 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2567 !cap_zlr(iommu->cap))
2568 prot |= DMA_PTE_READ;
2569 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2570 prot |= DMA_PTE_WRITE;
2572 * paddr - (paddr + size) might be partial page, we should map the whole
2573 * page. Note: if two part of one page are separately mapped, we
2574 * might have two guest_addr mapping to the same host paddr, but this
2575 * is not a big problem
2577 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2578 mm_to_dma_pfn(paddr_pfn), size, prot);
2582 /* it's a non-present to present mapping. Only flush if caching mode */
2583 if (cap_caching_mode(iommu->cap))
2584 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2586 iommu_flush_write_buffer(iommu);
2588 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2589 start_paddr += paddr & ~PAGE_MASK;
2594 __free_iova(&domain->iovad, iova);
2595 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2596 pci_name(pdev), size, (unsigned long long)paddr, dir);
2600 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2601 unsigned long offset, size_t size,
2602 enum dma_data_direction dir,
2603 struct dma_attrs *attrs)
2605 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2606 dir, to_pci_dev(dev)->dma_mask);
2609 static void flush_unmaps(void)
2615 /* just flush them all */
2616 for (i = 0; i < g_num_of_iommus; i++) {
2617 struct intel_iommu *iommu = g_iommus[i];
2621 if (!deferred_flush[i].next)
2624 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2625 DMA_TLB_GLOBAL_FLUSH);
2626 for (j = 0; j < deferred_flush[i].next; j++) {
2628 struct iova *iova = deferred_flush[i].iova[j];
2630 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2631 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2632 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2633 iova->pfn_lo << PAGE_SHIFT, mask);
2634 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2636 deferred_flush[i].next = 0;
2642 static void flush_unmaps_timeout(unsigned long data)
2644 unsigned long flags;
2646 spin_lock_irqsave(&async_umap_flush_lock, flags);
2648 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2651 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2653 unsigned long flags;
2655 struct intel_iommu *iommu;
2657 spin_lock_irqsave(&async_umap_flush_lock, flags);
2658 if (list_size == HIGH_WATER_MARK)
2661 iommu = domain_get_iommu(dom);
2662 iommu_id = iommu->seq_id;
2664 next = deferred_flush[iommu_id].next;
2665 deferred_flush[iommu_id].domain[next] = dom;
2666 deferred_flush[iommu_id].iova[next] = iova;
2667 deferred_flush[iommu_id].next++;
2670 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2674 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2677 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2678 size_t size, enum dma_data_direction dir,
2679 struct dma_attrs *attrs)
2681 struct pci_dev *pdev = to_pci_dev(dev);
2682 struct dmar_domain *domain;
2683 unsigned long start_pfn, last_pfn;
2685 struct intel_iommu *iommu;
2687 if (iommu_no_mapping(dev))
2690 domain = find_domain(pdev);
2693 iommu = domain_get_iommu(domain);
2695 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2696 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2697 (unsigned long long)dev_addr))
2700 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2701 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2703 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2704 pci_name(pdev), start_pfn, last_pfn);
2706 /* clear the whole page */
2707 dma_pte_clear_range(domain, start_pfn, last_pfn);
2709 /* free page tables */
2710 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2712 if (intel_iommu_strict) {
2713 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2714 last_pfn - start_pfn + 1);
2716 __free_iova(&domain->iovad, iova);
2718 add_unmap(domain, iova);
2720 * queue up the release of the unmap to save the 1/6th of the
2721 * cpu used up by the iotlb flush operation...
2726 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2727 dma_addr_t *dma_handle, gfp_t flags)
2732 size = PAGE_ALIGN(size);
2733 order = get_order(size);
2734 flags &= ~(GFP_DMA | GFP_DMA32);
2736 vaddr = (void *)__get_free_pages(flags, order);
2739 memset(vaddr, 0, size);
2741 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2743 hwdev->coherent_dma_mask);
2746 free_pages((unsigned long)vaddr, order);
2750 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2751 dma_addr_t dma_handle)
2755 size = PAGE_ALIGN(size);
2756 order = get_order(size);
2758 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2759 free_pages((unsigned long)vaddr, order);
2762 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2763 int nelems, enum dma_data_direction dir,
2764 struct dma_attrs *attrs)
2766 struct pci_dev *pdev = to_pci_dev(hwdev);
2767 struct dmar_domain *domain;
2768 unsigned long start_pfn, last_pfn;
2770 struct intel_iommu *iommu;
2772 if (iommu_no_mapping(hwdev))
2775 domain = find_domain(pdev);
2778 iommu = domain_get_iommu(domain);
2780 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2781 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2782 (unsigned long long)sglist[0].dma_address))
2785 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2786 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2788 /* clear the whole page */
2789 dma_pte_clear_range(domain, start_pfn, last_pfn);
2791 /* free page tables */
2792 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2794 if (intel_iommu_strict) {
2795 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2796 last_pfn - start_pfn + 1);
2798 __free_iova(&domain->iovad, iova);
2800 add_unmap(domain, iova);
2802 * queue up the release of the unmap to save the 1/6th of the
2803 * cpu used up by the iotlb flush operation...
2808 static int intel_nontranslate_map_sg(struct device *hddev,
2809 struct scatterlist *sglist, int nelems, int dir)
2812 struct scatterlist *sg;
2814 for_each_sg(sglist, sg, nelems, i) {
2815 BUG_ON(!sg_page(sg));
2816 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2817 sg->dma_length = sg->length;
2822 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2823 enum dma_data_direction dir, struct dma_attrs *attrs)
2826 struct pci_dev *pdev = to_pci_dev(hwdev);
2827 struct dmar_domain *domain;
2830 size_t offset_pfn = 0;
2831 struct iova *iova = NULL;
2833 struct scatterlist *sg;
2834 unsigned long start_vpfn;
2835 struct intel_iommu *iommu;
2837 BUG_ON(dir == DMA_NONE);
2838 if (iommu_no_mapping(hwdev))
2839 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2841 domain = get_valid_domain_for_dev(pdev);
2845 iommu = domain_get_iommu(domain);
2847 for_each_sg(sglist, sg, nelems, i)
2848 size += aligned_nrpages(sg->offset, sg->length);
2850 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2853 sglist->dma_length = 0;
2858 * Check if DMAR supports zero-length reads on write only
2861 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2862 !cap_zlr(iommu->cap))
2863 prot |= DMA_PTE_READ;
2864 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2865 prot |= DMA_PTE_WRITE;
2867 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2869 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2870 if (unlikely(ret)) {
2871 /* clear the page */
2872 dma_pte_clear_range(domain, start_vpfn,
2873 start_vpfn + size - 1);
2874 /* free page tables */
2875 dma_pte_free_pagetable(domain, start_vpfn,
2876 start_vpfn + size - 1);
2878 __free_iova(&domain->iovad, iova);
2882 /* it's a non-present to present mapping. Only flush if caching mode */
2883 if (cap_caching_mode(iommu->cap))
2884 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2886 iommu_flush_write_buffer(iommu);
2891 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2896 struct dma_map_ops intel_dma_ops = {
2897 .alloc_coherent = intel_alloc_coherent,
2898 .free_coherent = intel_free_coherent,
2899 .map_sg = intel_map_sg,
2900 .unmap_sg = intel_unmap_sg,
2901 .map_page = intel_map_page,
2902 .unmap_page = intel_unmap_page,
2903 .mapping_error = intel_mapping_error,
2906 static inline int iommu_domain_cache_init(void)
2910 iommu_domain_cache = kmem_cache_create("iommu_domain",
2911 sizeof(struct dmar_domain),
2916 if (!iommu_domain_cache) {
2917 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2924 static inline int iommu_devinfo_cache_init(void)
2928 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2929 sizeof(struct device_domain_info),
2933 if (!iommu_devinfo_cache) {
2934 printk(KERN_ERR "Couldn't create devinfo cache\n");
2941 static inline int iommu_iova_cache_init(void)
2945 iommu_iova_cache = kmem_cache_create("iommu_iova",
2946 sizeof(struct iova),
2950 if (!iommu_iova_cache) {
2951 printk(KERN_ERR "Couldn't create iova cache\n");
2958 static int __init iommu_init_mempool(void)
2961 ret = iommu_iova_cache_init();
2965 ret = iommu_domain_cache_init();
2969 ret = iommu_devinfo_cache_init();
2973 kmem_cache_destroy(iommu_domain_cache);
2975 kmem_cache_destroy(iommu_iova_cache);
2980 static void __init iommu_exit_mempool(void)
2982 kmem_cache_destroy(iommu_devinfo_cache);
2983 kmem_cache_destroy(iommu_domain_cache);
2984 kmem_cache_destroy(iommu_iova_cache);
2988 static void __init init_no_remapping_devices(void)
2990 struct dmar_drhd_unit *drhd;
2992 for_each_drhd_unit(drhd) {
2993 if (!drhd->include_all) {
2995 for (i = 0; i < drhd->devices_cnt; i++)
2996 if (drhd->devices[i] != NULL)
2998 /* ignore DMAR unit if no pci devices exist */
2999 if (i == drhd->devices_cnt)
3007 for_each_drhd_unit(drhd) {
3009 if (drhd->ignored || drhd->include_all)
3012 for (i = 0; i < drhd->devices_cnt; i++)
3013 if (drhd->devices[i] &&
3014 !IS_GFX_DEVICE(drhd->devices[i]))
3017 if (i < drhd->devices_cnt)
3020 /* bypass IOMMU if it is just for gfx devices */
3022 for (i = 0; i < drhd->devices_cnt; i++) {
3023 if (!drhd->devices[i])
3025 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3030 #ifdef CONFIG_SUSPEND
3031 static int init_iommu_hw(void)
3033 struct dmar_drhd_unit *drhd;
3034 struct intel_iommu *iommu = NULL;
3036 for_each_active_iommu(iommu, drhd)
3038 dmar_reenable_qi(iommu);
3040 for_each_active_iommu(iommu, drhd) {
3041 iommu_flush_write_buffer(iommu);
3043 iommu_set_root_entry(iommu);
3045 iommu->flush.flush_context(iommu, 0, 0, 0,
3046 DMA_CCMD_GLOBAL_INVL);
3047 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3048 DMA_TLB_GLOBAL_FLUSH);
3049 iommu_disable_protect_mem_regions(iommu);
3050 iommu_enable_translation(iommu);
3056 static void iommu_flush_all(void)
3058 struct dmar_drhd_unit *drhd;
3059 struct intel_iommu *iommu;
3061 for_each_active_iommu(iommu, drhd) {
3062 iommu->flush.flush_context(iommu, 0, 0, 0,
3063 DMA_CCMD_GLOBAL_INVL);
3064 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3065 DMA_TLB_GLOBAL_FLUSH);
3069 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3071 struct dmar_drhd_unit *drhd;
3072 struct intel_iommu *iommu = NULL;
3075 for_each_active_iommu(iommu, drhd) {
3076 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3078 if (!iommu->iommu_state)
3084 for_each_active_iommu(iommu, drhd) {
3085 iommu_disable_translation(iommu);
3087 spin_lock_irqsave(&iommu->register_lock, flag);
3089 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3090 readl(iommu->reg + DMAR_FECTL_REG);
3091 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3092 readl(iommu->reg + DMAR_FEDATA_REG);
3093 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3094 readl(iommu->reg + DMAR_FEADDR_REG);
3095 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3096 readl(iommu->reg + DMAR_FEUADDR_REG);
3098 spin_unlock_irqrestore(&iommu->register_lock, flag);
3103 for_each_active_iommu(iommu, drhd)
3104 kfree(iommu->iommu_state);
3109 static int iommu_resume(struct sys_device *dev)
3111 struct dmar_drhd_unit *drhd;
3112 struct intel_iommu *iommu = NULL;
3115 if (init_iommu_hw()) {
3116 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3120 for_each_active_iommu(iommu, drhd) {
3122 spin_lock_irqsave(&iommu->register_lock, flag);
3124 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3125 iommu->reg + DMAR_FECTL_REG);
3126 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3127 iommu->reg + DMAR_FEDATA_REG);
3128 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3129 iommu->reg + DMAR_FEADDR_REG);
3130 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3131 iommu->reg + DMAR_FEUADDR_REG);
3133 spin_unlock_irqrestore(&iommu->register_lock, flag);
3136 for_each_active_iommu(iommu, drhd)
3137 kfree(iommu->iommu_state);
3142 static struct sysdev_class iommu_sysclass = {
3144 .resume = iommu_resume,
3145 .suspend = iommu_suspend,
3148 static struct sys_device device_iommu = {
3149 .cls = &iommu_sysclass,
3152 static int __init init_iommu_sysfs(void)
3156 error = sysdev_class_register(&iommu_sysclass);
3160 error = sysdev_register(&device_iommu);
3162 sysdev_class_unregister(&iommu_sysclass);
3168 static int __init init_iommu_sysfs(void)
3172 #endif /* CONFIG_PM */
3174 int __init intel_iommu_init(void)
3178 if (dmar_table_init())
3181 if (dmar_dev_scope_init())
3185 * Check the need for DMA-remapping initialization now.
3186 * Above initialization will also be used by Interrupt-remapping.
3188 if (no_iommu || swiotlb || dmar_disabled)
3191 iommu_init_mempool();
3192 dmar_init_reserved_ranges();
3194 init_no_remapping_devices();
3198 printk(KERN_ERR "IOMMU: dmar init failed\n");
3199 put_iova_domain(&reserved_iova_list);
3200 iommu_exit_mempool();
3204 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3206 init_timer(&unmap_timer);
3208 dma_ops = &intel_dma_ops;
3212 register_iommu(&intel_iommu_ops);
3217 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3218 struct pci_dev *pdev)
3220 struct pci_dev *tmp, *parent;
3222 if (!iommu || !pdev)
3225 /* dependent device detach */
3226 tmp = pci_find_upstream_pcie_bridge(pdev);
3227 /* Secondary interface's bus number and devfn 0 */
3229 parent = pdev->bus->self;
3230 while (parent != tmp) {
3231 iommu_detach_dev(iommu, parent->bus->number,
3233 parent = parent->bus->self;
3235 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3236 iommu_detach_dev(iommu,
3237 tmp->subordinate->number, 0);
3238 else /* this is a legacy PCI bridge */
3239 iommu_detach_dev(iommu, tmp->bus->number,
3244 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3245 struct pci_dev *pdev)
3247 struct device_domain_info *info;
3248 struct intel_iommu *iommu;
3249 unsigned long flags;
3251 struct list_head *entry, *tmp;
3253 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3258 spin_lock_irqsave(&device_domain_lock, flags);
3259 list_for_each_safe(entry, tmp, &domain->devices) {
3260 info = list_entry(entry, struct device_domain_info, link);
3261 /* No need to compare PCI domain; it has to be the same */
3262 if (info->bus == pdev->bus->number &&
3263 info->devfn == pdev->devfn) {
3264 list_del(&info->link);
3265 list_del(&info->global);
3267 info->dev->dev.archdata.iommu = NULL;
3268 spin_unlock_irqrestore(&device_domain_lock, flags);
3270 iommu_disable_dev_iotlb(info);
3271 iommu_detach_dev(iommu, info->bus, info->devfn);
3272 iommu_detach_dependent_devices(iommu, pdev);
3273 free_devinfo_mem(info);
3275 spin_lock_irqsave(&device_domain_lock, flags);
3283 /* if there is no other devices under the same iommu
3284 * owned by this domain, clear this iommu in iommu_bmp
3285 * update iommu count and coherency
3287 if (iommu == device_to_iommu(info->segment, info->bus,
3293 unsigned long tmp_flags;
3294 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3295 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3296 domain->iommu_count--;
3297 domain_update_iommu_cap(domain);
3298 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3301 spin_unlock_irqrestore(&device_domain_lock, flags);
3304 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3306 struct device_domain_info *info;
3307 struct intel_iommu *iommu;
3308 unsigned long flags1, flags2;
3310 spin_lock_irqsave(&device_domain_lock, flags1);
3311 while (!list_empty(&domain->devices)) {
3312 info = list_entry(domain->devices.next,
3313 struct device_domain_info, link);
3314 list_del(&info->link);
3315 list_del(&info->global);
3317 info->dev->dev.archdata.iommu = NULL;
3319 spin_unlock_irqrestore(&device_domain_lock, flags1);
3321 iommu_disable_dev_iotlb(info);
3322 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3323 iommu_detach_dev(iommu, info->bus, info->devfn);
3324 iommu_detach_dependent_devices(iommu, info->dev);
3326 /* clear this iommu in iommu_bmp, update iommu count
3329 spin_lock_irqsave(&domain->iommu_lock, flags2);
3330 if (test_and_clear_bit(iommu->seq_id,
3331 &domain->iommu_bmp)) {
3332 domain->iommu_count--;
3333 domain_update_iommu_cap(domain);
3335 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3337 free_devinfo_mem(info);
3338 spin_lock_irqsave(&device_domain_lock, flags1);
3340 spin_unlock_irqrestore(&device_domain_lock, flags1);
3343 /* domain id for virtual machine, it won't be set in context */
3344 static unsigned long vm_domid;
3346 static int vm_domain_min_agaw(struct dmar_domain *domain)
3349 int min_agaw = domain->agaw;
3351 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3352 for (; i < g_num_of_iommus; ) {
3353 if (min_agaw > g_iommus[i]->agaw)
3354 min_agaw = g_iommus[i]->agaw;
3356 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3362 static struct dmar_domain *iommu_alloc_vm_domain(void)
3364 struct dmar_domain *domain;
3366 domain = alloc_domain_mem();
3370 domain->id = vm_domid++;
3371 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3372 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3377 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3381 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3382 spin_lock_init(&domain->iommu_lock);
3384 domain_reserve_special_ranges(domain);
3386 /* calculate AGAW */
3387 domain->gaw = guest_width;
3388 adjust_width = guestwidth_to_adjustwidth(guest_width);
3389 domain->agaw = width_to_agaw(adjust_width);
3391 INIT_LIST_HEAD(&domain->devices);
3393 domain->iommu_count = 0;
3394 domain->iommu_coherency = 0;
3395 domain->iommu_snooping = 0;
3396 domain->max_addr = 0;
3398 /* always allocate the top pgd */
3399 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3402 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3406 static void iommu_free_vm_domain(struct dmar_domain *domain)
3408 unsigned long flags;
3409 struct dmar_drhd_unit *drhd;
3410 struct intel_iommu *iommu;
3412 unsigned long ndomains;
3414 for_each_drhd_unit(drhd) {
3417 iommu = drhd->iommu;
3419 ndomains = cap_ndoms(iommu->cap);
3420 i = find_first_bit(iommu->domain_ids, ndomains);
3421 for (; i < ndomains; ) {
3422 if (iommu->domains[i] == domain) {
3423 spin_lock_irqsave(&iommu->lock, flags);
3424 clear_bit(i, iommu->domain_ids);
3425 iommu->domains[i] = NULL;
3426 spin_unlock_irqrestore(&iommu->lock, flags);
3429 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3434 static void vm_domain_exit(struct dmar_domain *domain)
3436 /* Domain 0 is reserved, so dont process it */
3440 vm_domain_remove_all_dev_info(domain);
3442 put_iova_domain(&domain->iovad);
3445 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3447 /* free page tables */
3448 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3450 iommu_free_vm_domain(domain);
3451 free_domain_mem(domain);
3454 static int intel_iommu_domain_init(struct iommu_domain *domain)
3456 struct dmar_domain *dmar_domain;
3458 dmar_domain = iommu_alloc_vm_domain();
3461 "intel_iommu_domain_init: dmar_domain == NULL\n");
3464 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3466 "intel_iommu_domain_init() failed\n");
3467 vm_domain_exit(dmar_domain);
3470 domain->priv = dmar_domain;
3475 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3477 struct dmar_domain *dmar_domain = domain->priv;
3479 domain->priv = NULL;
3480 vm_domain_exit(dmar_domain);
3483 static int intel_iommu_attach_device(struct iommu_domain *domain,
3486 struct dmar_domain *dmar_domain = domain->priv;
3487 struct pci_dev *pdev = to_pci_dev(dev);
3488 struct intel_iommu *iommu;
3492 /* normally pdev is not mapped */
3493 if (unlikely(domain_context_mapped(pdev))) {
3494 struct dmar_domain *old_domain;
3496 old_domain = find_domain(pdev);
3498 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3499 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3500 domain_remove_one_dev_info(old_domain, pdev);
3502 domain_remove_dev_info(old_domain);
3506 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3511 /* check if this iommu agaw is sufficient for max mapped address */
3512 addr_width = agaw_to_width(iommu->agaw);
3513 end = DOMAIN_MAX_ADDR(addr_width);
3514 end = end & VTD_PAGE_MASK;
3515 if (end < dmar_domain->max_addr) {
3516 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3517 "sufficient for the mapped address (%llx)\n",
3518 __func__, iommu->agaw, dmar_domain->max_addr);
3522 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3525 static void intel_iommu_detach_device(struct iommu_domain *domain,
3528 struct dmar_domain *dmar_domain = domain->priv;
3529 struct pci_dev *pdev = to_pci_dev(dev);
3531 domain_remove_one_dev_info(dmar_domain, pdev);
3534 static int intel_iommu_map_range(struct iommu_domain *domain,
3535 unsigned long iova, phys_addr_t hpa,
3536 size_t size, int iommu_prot)
3538 struct dmar_domain *dmar_domain = domain->priv;
3544 if (iommu_prot & IOMMU_READ)
3545 prot |= DMA_PTE_READ;
3546 if (iommu_prot & IOMMU_WRITE)
3547 prot |= DMA_PTE_WRITE;
3548 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3549 prot |= DMA_PTE_SNP;
3551 max_addr = iova + size;
3552 if (dmar_domain->max_addr < max_addr) {
3556 /* check if minimum agaw is sufficient for mapped address */
3557 min_agaw = vm_domain_min_agaw(dmar_domain);
3558 addr_width = agaw_to_width(min_agaw);
3559 end = DOMAIN_MAX_ADDR(addr_width);
3560 end = end & VTD_PAGE_MASK;
3561 if (end < max_addr) {
3562 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3563 "sufficient for the mapped address (%llx)\n",
3564 __func__, min_agaw, max_addr);
3567 dmar_domain->max_addr = max_addr;
3569 /* Round up size to next multiple of PAGE_SIZE, if it and
3570 the low bits of hpa would take us onto the next page */
3571 size = aligned_nrpages(hpa, size);
3572 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3573 hpa >> VTD_PAGE_SHIFT, size, prot);
3577 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3578 unsigned long iova, size_t size)
3580 struct dmar_domain *dmar_domain = domain->priv;
3585 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3586 (iova + size - 1) >> VTD_PAGE_SHIFT);
3588 if (dmar_domain->max_addr == iova + size)
3589 dmar_domain->max_addr = iova;
3592 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3595 struct dmar_domain *dmar_domain = domain->priv;
3596 struct dma_pte *pte;
3599 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3601 phys = dma_pte_addr(pte);
3606 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3609 struct dmar_domain *dmar_domain = domain->priv;
3611 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3612 return dmar_domain->iommu_snooping;
3617 static struct iommu_ops intel_iommu_ops = {
3618 .domain_init = intel_iommu_domain_init,
3619 .domain_destroy = intel_iommu_domain_destroy,
3620 .attach_dev = intel_iommu_attach_device,
3621 .detach_dev = intel_iommu_detach_device,
3622 .map = intel_iommu_map_range,
3623 .unmap = intel_iommu_unmap_range,
3624 .iova_to_phys = intel_iommu_iova_to_phys,
3625 .domain_has_cap = intel_iommu_domain_has_cap,
3628 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3631 * Mobile 4 Series Chipset neglects to set RWBF capability,
3634 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3638 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);