]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
powerpc: Fix up dma_alloc_coherent() on platforms without cache coherency.
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 27 May 2009 03:50:33 +0000 (13:50 +1000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 27 May 2009 06:33:59 +0000 (16:33 +1000)
The implementation we just revived has issues, such as using a
Kconfig-defined virtual address area in kernel space that nothing
actually carves out (and thus will overlap whatever is there),
or having some dependencies on being self contained in a single
PTE page which adds unnecessary constraints on the kernel virtual
address space.

This fixes it by using more classic PTE accessors and automatically
locating the area for consistent memory, carving an appropriate hole
in the kernel virtual address space, leaving only the size of that
area as a Kconfig option. It also brings some dma-mask related fixes
from the ARM implementation which was almost identical initially but
grew its own fixes.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/Kconfig
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/include/asm/pgtable-ppc32.h
arch/powerpc/kernel/dma.c
arch/powerpc/mm/dma-noncoherent.c
arch/powerpc/mm/mem.c

index 3bb43adce44dc3c907d63de44660ba8cc4fb6531..cdc9a6ff4be823a356521e00b7d35eb15b28e013 100644 (file)
@@ -868,19 +868,6 @@ config TASK_SIZE
        default "0x80000000" if PPC_PREP || PPC_8xx
        default "0xc0000000"
 
-config CONSISTENT_START_BOOL
-       bool "Set custom consistent memory pool address"
-       depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
-       help
-         This option allows you to set the base virtual address
-         of the consistent memory pool.  This pool of virtual
-         memory is used to make consistent memory allocations.
-
-config CONSISTENT_START
-       hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
-       default "0xfd000000" if (NOT_COHERENT_CACHE && 8xx)
-       default "0xff100000" if NOT_COHERENT_CACHE
-
 config CONSISTENT_SIZE_BOOL
        bool "Set custom consistent memory pool size"
        depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
index c69f2b5f0cc40035877ccea1f363c3de6711fc61..cb448d68452c9e01918937952cb1517563538927 100644 (file)
@@ -26,7 +26,9 @@
  * allocate the space "normally" and use the cache management functions
  * to ensure it is consistent.
  */
-extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
+struct device;
+extern void *__dma_alloc_coherent(struct device *dev, size_t size,
+                                 dma_addr_t *handle, gfp_t gfp);
 extern void __dma_free_coherent(size_t size, void *vaddr);
 extern void __dma_sync(void *vaddr, size_t size, int direction);
 extern void __dma_sync_page(struct page *page, unsigned long offset,
@@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
  * Cache coherent cores.
  */
 
-#define __dma_alloc_coherent(gfp, size, handle)        NULL
+#define __dma_alloc_coherent(dev, gfp, size, handle)   NULL
 #define __dma_free_coherent(size, addr)                ((void)0)
 #define __dma_sync(addr, size, rw)             ((void)0)
 #define __dma_sync_page(pg, off, sz, rw)       ((void)0)
index 28fe9d4bae352ff19a44e78f7344edea44270632..c9ff9d75990eb94eaf55944482dff6b5301d3afe 100644 (file)
@@ -71,7 +71,11 @@ extern int icache_44x_need_flush;
  * until mem_init() at which point this becomes the top of the vmalloc
  * and ioremap space
  */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define IOREMAP_TOP    ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#else
 #define IOREMAP_TOP    KVIRT_TOP
+#endif
 
 /*
  * Just any arbitrary offset to the start of the vmalloc VM area: the
index 53c7788cba78d2978002e2edc5d03027f459e974..6b02793dc75b5bd3d95bd2d1ff767ec842fa4905 100644 (file)
@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
 {
        void *ret;
 #ifdef CONFIG_NOT_COHERENT_CACHE
-       ret = __dma_alloc_coherent(size, dma_handle, flag);
+       ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
        if (ret == NULL)
                return NULL;
        *dma_handle += get_dma_direct_offset(dev);
index b7dc4c19f58211641d2bb1db44dea62315a3d641..36692f5c9a7637348bed0bc6f9318498035af14b 100644 (file)
 
 #include <asm/tlbflush.h>
 
+#include "mmu_decl.h"
+
 /*
  * This address range defaults to a value that is safe for all
  * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
  * can be further configured for specific applications under
  * the "Advanced Setup" menu. -Matt
  */
-#define CONSISTENT_BASE        (CONFIG_CONSISTENT_START)
-#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
+#define CONSISTENT_BASE                (IOREMAP_TOP)
+#define CONSISTENT_END                 (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
 #define CONSISTENT_OFFSET(x)   (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
 
 /*
  * This is the page table (2MB) covering uncached, DMA consistent allocations
  */
-static pte_t *consistent_pte;
 static DEFINE_SPINLOCK(consistent_lock);
 
 /*
@@ -148,22 +149,38 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
  * virtual and bus address for that space.
  */
 void *
-__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
+__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
 {
        struct page *page;
        struct ppc_vm_region *c;
        unsigned long order;
-       u64 mask = 0x00ffffff, limit; /* ISA default */
+       u64 mask = ISA_DMA_THRESHOLD, limit;
 
-       if (!consistent_pte) {
-               printk(KERN_ERR "%s: not initialised\n", __func__);
-               dump_stack();
-               return NULL;
+       if (dev) {
+               mask = dev->coherent_dma_mask;
+
+               /*
+                * Sanity check the DMA mask - it must be non-zero, and
+                * must be able to be satisfied by a DMA allocation.
+                */
+               if (mask == 0) {
+                       dev_warn(dev, "coherent DMA mask is unset\n");
+                       goto no_page;
+               }
+
+               if ((~mask) & ISA_DMA_THRESHOLD) {
+                       dev_warn(dev, "coherent DMA mask %#llx is smaller "
+                                "than system GFP_DMA mask %#llx\n",
+                                mask, (unsigned long long)ISA_DMA_THRESHOLD);
+                       goto no_page;
+               }
        }
 
+
        size = PAGE_ALIGN(size);
        limit = (mask + 1) & ~mask;
-       if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
+       if ((limit && size >= limit) ||
+           size >= (CONSISTENT_END - CONSISTENT_BASE)) {
                printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
                       size, mask);
                return NULL;
@@ -171,6 +188,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
 
        order = get_order(size);
 
+       /* Might be useful if we ever have a real legacy DMA zone... */
        if (mask != 0xffffffff)
                gfp |= GFP_DMA;
 
@@ -195,7 +213,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
                            gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
        if (c) {
                unsigned long vaddr = c->vm_start;
-               pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
                struct page *end = page + (1 << order);
 
                split_page(page, order);
@@ -206,13 +223,10 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
                *handle = page_to_phys(page);
 
                do {
-                       BUG_ON(!pte_none(*pte));
-
                        SetPageReserved(page);
-                       set_pte_at(&init_mm, vaddr,
-                                  pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
+                       map_page(vaddr, page_to_phys(page),
+                                pgprot_noncached(PAGE_KERNEL));
                        page++;
-                       pte++;
                        vaddr += PAGE_SIZE;
                } while (size -= PAGE_SIZE);
 
@@ -241,8 +255,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
 {
        struct ppc_vm_region *c;
        unsigned long flags, addr;
-       pte_t *ptep;
-
+       
        size = PAGE_ALIGN(size);
 
        spin_lock_irqsave(&consistent_lock, flags);
@@ -258,29 +271,26 @@ void __dma_free_coherent(size_t size, void *vaddr)
                size = c->vm_end - c->vm_start;
        }
 
-       ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
        addr = c->vm_start;
        do {
-               pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
+               pte_t *ptep;
                unsigned long pfn;
 
-               ptep++;
-               addr += PAGE_SIZE;
-
-               if (!pte_none(pte) && pte_present(pte)) {
-                       pfn = pte_pfn(pte);
-
+               ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
+                                                              addr),
+                                                   addr),
+                                        addr);
+               if (!pte_none(*ptep) && pte_present(*ptep)) {
+                       pfn = pte_pfn(*ptep);
+                       pte_clear(&init_mm, addr, ptep);
                        if (pfn_valid(pfn)) {
                                struct page *page = pfn_to_page(pfn);
-                               ClearPageReserved(page);
 
+                               ClearPageReserved(page);
                                __free_page(page);
-                               continue;
                        }
                }
-
-               printk(KERN_CRIT "%s: bad page in kernel page table\n",
-                      __func__);
+               addr += PAGE_SIZE;
        } while (size -= PAGE_SIZE);
 
        flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -300,42 +310,6 @@ void __dma_free_coherent(size_t size, void *vaddr)
 }
 EXPORT_SYMBOL(__dma_free_coherent);
 
-/*
- * Initialise the consistent memory allocation.
- */
-static int __init dma_alloc_init(void)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       int ret = 0;
-
-       do {
-               pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
-               pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
-               pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
-               if (!pmd) {
-                       printk(KERN_ERR "%s: no pmd tables\n", __func__);
-                       ret = -ENOMEM;
-                       break;
-               }
-
-               pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
-               if (!pte) {
-                       printk(KERN_ERR "%s: no pte tables\n", __func__);
-                       ret = -ENOMEM;
-                       break;
-               }
-
-               consistent_pte = pte;
-       } while (0);
-
-       return ret;
-}
-
-core_initcall(dma_alloc_init);
-
 /*
  * make an area consistent.
  */
index d3a4e67561fa23b898065972d31067f1f76d16b3..579382c163a9cd40ca9381d3a0eaf345659ed664 100644 (file)
@@ -387,6 +387,10 @@ void __init mem_init(void)
        pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
                PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
 #endif /* CONFIG_HIGHMEM */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
+               IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
+#endif /* CONFIG_NOT_COHERENT_CACHE */
        pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
                ioremap_bot, IOREMAP_TOP);
        pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",