]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
intel-iommu: Clean up handling of "caching mode" vs. IOTLB flushing.
authorDavid Woodhouse <David.Woodhouse@intel.com>
Sun, 10 May 2009 18:58:49 +0000 (19:58 +0100)
committerDavid Woodhouse <David.Woodhouse@intel.com>
Sun, 10 May 2009 18:58:49 +0000 (19:58 +0100)
As we just did for context cache flushing, clean up the logic around
whether we need to flush the iotlb or just the write-buffer, depending
on caching mode.

Fix the same bug in qi_flush_iotlb() that qi_flush_context() had -- it
isn't supposed to be returning an error; it's supposed to be returning a
flag which triggers a write-buffer flush.

Remove some superfluous conditional write-buffer flushes which could
never have happened because they weren't for non-present-to-present
mapping changes anyway.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
drivers/pci/dmar.c
drivers/pci/intel-iommu.c
include/linux/intel-iommu.h

index 10a071ba32325e45aaa56a44b06734809355f2b0..df6af0d4ec036692d1616b9ad7fa2a0225d962ca 100644 (file)
@@ -735,22 +735,14 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
        qi_submit_sync(&desc, iommu);
 }
 
-int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
-                  unsigned int size_order, u64 type,
-                  int non_present_entry_flush)
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+                   unsigned int size_order, u64 type)
 {
        u8 dw = 0, dr = 0;
 
        struct qi_desc desc;
        int ih = 0;
 
-       if (non_present_entry_flush) {
-               if (!cap_caching_mode(iommu->cap))
-                       return 1;
-               else
-                       did = 0;
-       }
-
        if (cap_write_drain(iommu->cap))
                dw = 1;
 
@@ -762,7 +754,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
        desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
                | QI_IOTLB_AM(size_order);
 
-       return qi_submit_sync(&desc, iommu);
+       qi_submit_sync(&desc, iommu);
 }
 
 /*
index 9f5d9151edc9702e8f6c6a931e610764a2a2e250..f47d04aced87065ab5b892c690971e4f85636dc7 100644 (file)
@@ -891,27 +891,13 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
 }
 
 /* return value determine if we need a write buffer flush */
-static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
-       u64 addr, unsigned int size_order, u64 type,
-       int non_present_entry_flush)
+static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+                               u64 addr, unsigned int size_order, u64 type)
 {
        int tlb_offset = ecap_iotlb_offset(iommu->ecap);
        u64 val = 0, val_iva = 0;
        unsigned long flag;
 
-       /*
-        * In the non-present entry flush case, if hardware doesn't cache
-        * non-present entry we do nothing and if hardware cache non-present
-        * entry, we flush entries of domain 0 (the domain id is used to cache
-        * any non-present entries)
-        */
-       if (non_present_entry_flush) {
-               if (!cap_caching_mode(iommu->cap))
-                       return 1;
-               else
-                       did = 0;
-       }
-
        switch (type) {
        case DMA_TLB_GLOBAL_FLUSH:
                /* global flush doesn't need set IVA_REG */
@@ -959,12 +945,10 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
                pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
                        (unsigned long long)DMA_TLB_IIRG(type),
                        (unsigned long long)DMA_TLB_IAIG(val));
-       /* flush iotlb entry will implicitly flush write buffer */
-       return 0;
 }
 
-static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-       u64 addr, unsigned int pages, int non_present_entry_flush)
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+                                 u64 addr, unsigned int pages)
 {
        unsigned int mask;
 
@@ -974,8 +958,7 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
        /* Fallback to domain selective flush if no PSI support */
        if (!cap_pgsel_inv(iommu->cap))
                return iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                               DMA_TLB_DSI_FLUSH,
-                                               non_present_entry_flush);
+                                               DMA_TLB_DSI_FLUSH);
 
        /*
         * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -985,11 +968,10 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
        /* Fallback to domain selective flush if size is too big */
        if (mask > cap_max_amask_val(iommu->cap))
                return iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                       DMA_TLB_DSI_FLUSH, non_present_entry_flush);
+                                               DMA_TLB_DSI_FLUSH);
 
        return iommu->flush.flush_iotlb(iommu, did, addr, mask,
-                                       DMA_TLB_PSI_FLUSH,
-                                       non_present_entry_flush);
+                                       DMA_TLB_PSI_FLUSH);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1423,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
                                           (((u16)bus) << 8) | devfn,
                                           DMA_CCMD_MASK_NOBIT,
                                           DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
        } else {
                iommu_flush_write_buffer(iommu);
        }
@@ -1558,8 +1540,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
        clear_context_table(iommu, bus, devfn);
        iommu->flush.flush_context(iommu, 0, 0, 0,
                                           DMA_CCMD_GLOBAL_INVL);
-       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                        DMA_TLB_GLOBAL_FLUSH, 0);
+       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
 }
 
 static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -2096,8 +2077,7 @@ static int __init init_dmars(void)
                iommu_set_root_entry(iommu);
 
                iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
-                                        0);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
                iommu_disable_protect_mem_regions(iommu);
 
                ret = iommu_enable_translation(iommu);
@@ -2244,10 +2224,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
        if (ret)
                goto error;
 
-       /* it's a non-present to present mapping */
-       ret = iommu_flush_iotlb_psi(iommu, domain->id,
-                       start_paddr, size >> VTD_PAGE_SHIFT, 1);
-       if (ret)
+       /* it's a non-present to present mapping. Only flush if caching mode */
+       if (cap_caching_mode(iommu->cap))
+               iommu_flush_iotlb_psi(iommu, 0, start_paddr,
+                                     size >> VTD_PAGE_SHIFT);
+       else
                iommu_flush_write_buffer(iommu);
 
        return start_paddr + ((u64)paddr & (~PAGE_MASK));
@@ -2283,7 +2264,7 @@ static void flush_unmaps(void)
 
                if (deferred_flush[i].next) {
                        iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                                DMA_TLB_GLOBAL_FLUSH, 0);
+                                                DMA_TLB_GLOBAL_FLUSH);
                        for (j = 0; j < deferred_flush[i].next; j++) {
                                __free_iova(&deferred_flush[i].domain[j]->iovad,
                                                deferred_flush[i].iova[j]);
@@ -2362,9 +2343,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
        if (intel_iommu_strict) {
-               if (iommu_flush_iotlb_psi(iommu,
-                       domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
-                       iommu_flush_write_buffer(iommu);
+               iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+                                     size >> VTD_PAGE_SHIFT);
                /* free iova */
                __free_iova(&domain->iovad, iova);
        } else {
@@ -2455,9 +2435,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
 
-       if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
-                       size >> VTD_PAGE_SHIFT, 0))
-               iommu_flush_write_buffer(iommu);
+       iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+                             size >> VTD_PAGE_SHIFT);
 
        /* free iova */
        __free_iova(&domain->iovad, iova);
@@ -2549,10 +2528,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
                offset += size;
        }
 
-       /* it's a non-present to present mapping */
-       if (iommu_flush_iotlb_psi(iommu, domain->id,
-                       start_addr, offset >> VTD_PAGE_SHIFT, 1))
+       /* it's a non-present to present mapping. Only flush if caching mode */
+       if (cap_caching_mode(iommu->cap))
+               iommu_flush_iotlb_psi(iommu, 0, start_addr,
+                                     offset >> VTD_PAGE_SHIFT);
+       else
                iommu_flush_write_buffer(iommu);
+
        return nelems;
 }
 
@@ -2711,9 +2693,9 @@ static int init_iommu_hw(void)
                iommu_set_root_entry(iommu);
 
                iommu->flush.flush_context(iommu, 0, 0, 0,
-                                               DMA_CCMD_GLOBAL_INVL);
+                                          DMA_CCMD_GLOBAL_INVL);
                iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                               DMA_TLB_GLOBAL_FLUSH, 0);
+                                        DMA_TLB_GLOBAL_FLUSH);
                iommu_disable_protect_mem_regions(iommu);
                iommu_enable_translation(iommu);
        }
@@ -2728,9 +2710,9 @@ static void iommu_flush_all(void)
 
        for_each_active_iommu(iommu, drhd) {
                iommu->flush.flush_context(iommu, 0, 0, 0,
-                                               DMA_CCMD_GLOBAL_INVL);
+                                          DMA_CCMD_GLOBAL_INVL);
                iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                               DMA_TLB_GLOBAL_FLUSH, 0);
+                                        DMA_TLB_GLOBAL_FLUSH);
        }
 }
 
index f2b94dafbf38b90fbbe6fbc9d30198c692ec32a6..29e05a034c0955cabb03824990c1082a2ba064a2 100644 (file)
@@ -283,8 +283,8 @@ struct ir_table {
 struct iommu_flush {
        void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
                              u8 fm, u64 type);
-       int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
-               unsigned int size_order, u64 type, int non_present_entry_flush);
+       void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+                           unsigned int size_order, u64 type);
 };
 
 enum {
@@ -341,9 +341,8 @@ extern void qi_global_iec(struct intel_iommu *iommu);
 
 extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
                             u8 fm, u64 type);
-extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
-                         unsigned int size_order, u64 type,
-                         int non_present_entry_flush);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+                         unsigned int size_order, u64 type);
 
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);