]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/base/dma-coherent.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / base / dma-coherent.c
CommitLineData
ee7e5516
DES
1/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
5a0e3ad6 5#include <linux/slab.h>
ee7e5516
DES
6#include <linux/kernel.h>
7#include <linux/dma-mapping.h>
8
9struct dma_coherent_mem {
10 void *virt_base;
11 u32 device_base;
12 int size;
13 int flags;
14 unsigned long *bitmap;
15};
16
17int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
18 dma_addr_t device_addr, size_t size, int flags)
19{
20 void __iomem *mem_base = NULL;
21 int pages = size >> PAGE_SHIFT;
22 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
23
24 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
25 goto out;
26 if (!size)
27 goto out;
28 if (dev->dma_mem)
29 goto out;
30
31 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
32
33 mem_base = ioremap(bus_addr, size);
34 if (!mem_base)
35 goto out;
36
37 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
38 if (!dev->dma_mem)
39 goto out;
40 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
41 if (!dev->dma_mem->bitmap)
42 goto free1_out;
43
44 dev->dma_mem->virt_base = mem_base;
45 dev->dma_mem->device_base = device_addr;
46 dev->dma_mem->size = pages;
47 dev->dma_mem->flags = flags;
48
49 if (flags & DMA_MEMORY_MAP)
50 return DMA_MEMORY_MAP;
51
52 return DMA_MEMORY_IO;
53
54 free1_out:
55 kfree(dev->dma_mem);
56 out:
57 if (mem_base)
58 iounmap(mem_base);
59 return 0;
60}
61EXPORT_SYMBOL(dma_declare_coherent_memory);
62
63void dma_release_declared_memory(struct device *dev)
64{
65 struct dma_coherent_mem *mem = dev->dma_mem;
66
67 if (!mem)
68 return;
69 dev->dma_mem = NULL;
70 iounmap(mem->virt_base);
71 kfree(mem->bitmap);
72 kfree(mem);
73}
74EXPORT_SYMBOL(dma_release_declared_memory);
75
76void *dma_mark_declared_memory_occupied(struct device *dev,
77 dma_addr_t device_addr, size_t size)
78{
79 struct dma_coherent_mem *mem = dev->dma_mem;
80 int pos, err;
ee7e5516 81
d2dc1f4a 82 size += device_addr & ~PAGE_MASK;
ee7e5516
DES
83
84 if (!mem)
85 return ERR_PTR(-EINVAL);
86
87 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
d2dc1f4a 88 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
ee7e5516
DES
89 if (err != 0)
90 return ERR_PTR(err);
91 return mem->virt_base + (pos << PAGE_SHIFT);
92}
93EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
94
b6d4f7e3 95/**
cb3952bf 96 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
b6d4f7e3
DES
97 *
98 * @dev: device from which we allocate memory
99 * @size: size of requested memory area
100 * @dma_handle: This will be filled with the correct dma handle
101 * @ret: This pointer will be filled with the virtual address
0609697e 102 * to allocated area.
b6d4f7e3 103 *
cb3952bf 104 * This function should be only called from per-arch dma_alloc_coherent()
b6d4f7e3
DES
105 * to support allocation from per-device coherent memory pools.
106 *
107 * Returns 0 if dma_alloc_coherent should continue with allocating from
cb3952bf 108 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
b6d4f7e3 109 */
ee7e5516
DES
110int dma_alloc_from_coherent(struct device *dev, ssize_t size,
111 dma_addr_t *dma_handle, void **ret)
112{
eccd83e1 113 struct dma_coherent_mem *mem;
ee7e5516 114 int order = get_order(size);
eccd83e1 115 int pageno;
ee7e5516 116
eccd83e1
AM
117 if (!dev)
118 return 0;
119 mem = dev->dma_mem;
120 if (!mem)
121 return 0;
0609697e
PM
122
123 *ret = NULL;
124
cdf57cab 125 if (unlikely(size > (mem->size << PAGE_SHIFT)))
0609697e 126 goto err;
eccd83e1
AM
127
128 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
0609697e
PM
129 if (unlikely(pageno < 0))
130 goto err;
131
132 /*
133 * Memory was found in the per-device area.
134 */
135 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
136 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
137 memset(*ret, 0, size);
138
eccd83e1 139 return 1;
0609697e
PM
140
141err:
142 /*
143 * In the case where the allocation can not be satisfied from the
144 * per-device area, try to fall back to generic memory if the
145 * constraints allow it.
146 */
147 return mem->flags & DMA_MEMORY_EXCLUSIVE;
ee7e5516 148}
a38409fb 149EXPORT_SYMBOL(dma_alloc_from_coherent);
ee7e5516 150
b6d4f7e3 151/**
cb3952bf 152 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
b6d4f7e3
DES
153 * @dev: device from which the memory was allocated
154 * @order: the order of pages allocated
155 * @vaddr: virtual address of allocated pages
156 *
157 * This checks whether the memory was allocated from the per-device
158 * coherent memory pool and if so, releases that memory.
159 *
160 * Returns 1 if we correctly released the memory, or 0 if
cb3952bf 161 * dma_release_coherent() should proceed with releasing memory from
b6d4f7e3
DES
162 * generic pools.
163 */
ee7e5516
DES
164int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
165{
166 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
167
168 if (mem && vaddr >= mem->virt_base && vaddr <
169 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
170 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
171
172 bitmap_release_region(mem->bitmap, page, order);
173 return 1;
174 }
175 return 0;
176}
a38409fb 177EXPORT_SYMBOL(dma_release_from_coherent);