]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
[ARM] introduce dma_cache_maint_page()
authorNicolas Pitre <nico@cam.org>
Fri, 13 Mar 2009 02:52:09 +0000 (22:52 -0400)
committerNicolas Pitre <nico@cam.org>
Mon, 16 Mar 2009 01:01:21 +0000 (21:01 -0400)
This is a helper to be used by the DMA mapping API to handle cache
maintenance for memory identified by a page structure instead of a
virtual address.  Those pages may or may not be highmem pages, and
when they're highmem pages, they may or may not be virtually mapped.
When they're not mapped then there is no L1 cache to worry about. But
even in that case the L2 cache must be processed since unmapped highmem
pages can still be L2 cached.

Signed-off-by: Nicolas Pitre <nico@marvell.com>
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/highmem.h
arch/arm/mm/dma-mapping.c

index 22cb14ec3438e9d74b1fa8f626bb65945c2d4607..59fa762e9c66f66d7cdde3f4a9beebc3e4faba92 100644 (file)
@@ -57,6 +57,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
 extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
+extern void dma_cache_maint_page(struct page *page, unsigned long offset,
+                                size_t size, int rw);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -316,7 +318,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
        BUG_ON(!valid_dma_direction(dir));
 
        if (!arch_is_coherent())
-               dma_cache_maint(page_address(page) + offset, size, dir);
+               dma_cache_maint_page(page, offset, size, dir);
 
        return page_to_dma(dev, page) + offset;
 }
index 023d5b374544441895b26006311bb5b880b926b8..7f36d00600b43da38ba4fda7fa59aa61da7d60ab 100644 (file)
 
 extern pte_t *pkmap_page_table;
 
+#define ARCH_NEEDS_KMAP_HIGH_GET
+
 extern void *kmap_high(struct page *page);
+extern void *kmap_high_get(struct page *page);
 extern void kunmap_high(struct page *page);
 
 extern void *kmap(struct page *page);
index f1ef5613ccd43248f07ae5aa91963f5ab2eff503..510c179b0ac873b2ee26c5bbaa70db25caabece9 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/dma-mapping.h>
 
 #include <asm/memory.h>
+#include <asm/highmem.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/sizes.h>
@@ -517,6 +518,74 @@ void dma_cache_maint(const void *start, size_t size, int direction)
 }
 EXPORT_SYMBOL(dma_cache_maint);
 
+static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
+                                      size_t size, int direction)
+{
+       void *vaddr;
+       unsigned long paddr;
+       void (*inner_op)(const void *, const void *);
+       void (*outer_op)(unsigned long, unsigned long);
+
+       switch (direction) {
+       case DMA_FROM_DEVICE:           /* invalidate only */
+               inner_op = dmac_inv_range;
+               outer_op = outer_inv_range;
+               break;
+       case DMA_TO_DEVICE:             /* writeback only */
+               inner_op = dmac_clean_range;
+               outer_op = outer_clean_range;
+               break;
+       case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
+               inner_op = dmac_flush_range;
+               outer_op = outer_flush_range;
+               break;
+       default:
+               BUG();
+       }
+
+       if (!PageHighMem(page)) {
+               vaddr = page_address(page) + offset;
+               inner_op(vaddr, vaddr + size);
+       } else {
+               vaddr = kmap_high_get(page);
+               if (vaddr) {
+                       vaddr += offset;
+                       inner_op(vaddr, vaddr + size);
+                       kunmap_high(page);
+               }
+       }
+
+       paddr = page_to_phys(page) + offset;
+       outer_op(paddr, paddr + size);
+}
+
+void dma_cache_maint_page(struct page *page, unsigned long offset,
+                         size_t size, int dir)
+{
+       /*
+        * A single sg entry may refer to multiple physically contiguous
+        * pages.  But we still need to process highmem pages individually.
+        * If highmem is not configured then the bulk of this loop gets
+        * optimized out.
+        */
+       size_t left = size;
+       do {
+               size_t len = left;
+               if (PageHighMem(page) && len + offset > PAGE_SIZE) {
+                       if (offset >= PAGE_SIZE) {
+                               page += offset / PAGE_SIZE;
+                               offset %= PAGE_SIZE;
+                       }
+                       len = PAGE_SIZE - offset;
+               }
+               dma_cache_maint_contiguous(page, offset, len, dir);
+               offset = 0;
+               page++;
+               left -= len;
+       } while (left);
+}
+EXPORT_SYMBOL(dma_cache_maint_page);
+
 /**
  * dma_map_sg - map a set of SG buffers for streaming mode DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -614,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                        continue;
 
                if (!arch_is_coherent())
-                       dma_cache_maint(sg_virt(s), s->length, dir);
+                       dma_cache_maint_page(sg_page(s), s->offset,
+                                            s->length, dir);
        }
 }
 EXPORT_SYMBOL(dma_sync_sg_for_device);