]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
9a88cbb5 | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
1da177e4 LT |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | |
9a88cbb5 | 10 | |
1da177e4 | 11 | #include <linux/types.h> |
9a88cbb5 | 12 | #include <linux/dma-mapping.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
4fcc47a0 | 15 | #include <linux/scatterlist.h> |
6e86b0bf | 16 | #include <linux/string.h> |
1da177e4 LT |
17 | |
18 | #include <asm/cache.h> | |
19 | #include <asm/io.h> | |
20 | ||
9a88cbb5 RB |
21 | #include <dma-coherence.h> |
22 | ||
c9d06962 FBH |
23 | static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) |
24 | { | |
25 | unsigned long addr = plat_dma_addr_to_phys(dma_addr); | |
26 | ||
27 | return (unsigned long)phys_to_virt(addr); | |
28 | } | |
29 | ||
1da177e4 LT |
30 | /* |
31 | * Warning on the terminology - Linux calls an uncached area coherent; | |
32 | * MIPS terminology calls memory areas with hardware maintained coherency | |
33 | * coherent. | |
34 | */ | |
35 | ||
9a88cbb5 RB |
36 | static inline int cpu_is_noncoherent_r10000(struct device *dev) |
37 | { | |
38 | return !plat_device_is_coherent(dev) && | |
10cc3529 RB |
39 | (current_cpu_type() == CPU_R10000 || |
40 | current_cpu_type() == CPU_R12000); | |
9a88cbb5 RB |
41 | } |
42 | ||
cce335ae RB |
43 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
44 | { | |
45 | /* ignore region specifiers */ | |
46 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
47 | ||
32016718 | 48 | #ifdef CONFIG_ZONE_DMA |
cce335ae RB |
49 | if (dev == NULL) |
50 | gfp |= __GFP_DMA; | |
51 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) | |
52 | gfp |= __GFP_DMA; | |
53 | else | |
54 | #endif | |
55 | #ifdef CONFIG_ZONE_DMA32 | |
56 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) | |
57 | gfp |= __GFP_DMA32; | |
58 | else | |
59 | #endif | |
60 | ; | |
61 | ||
62 | /* Don't invoke OOM killer */ | |
63 | gfp |= __GFP_NORETRY; | |
64 | ||
65 | return gfp; | |
66 | } | |
67 | ||
1da177e4 | 68 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
185a8ff5 | 69 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
70 | { |
71 | void *ret; | |
9a88cbb5 | 72 | |
cce335ae | 73 | gfp = massage_gfp_flags(dev, gfp); |
1da177e4 | 74 | |
1da177e4 LT |
75 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
76 | ||
77 | if (ret != NULL) { | |
78 | memset(ret, 0, size); | |
9a88cbb5 | 79 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
1da177e4 LT |
80 | } |
81 | ||
82 | return ret; | |
83 | } | |
84 | ||
85 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
86 | ||
87 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
185a8ff5 | 88 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
89 | { |
90 | void *ret; | |
91 | ||
cce335ae | 92 | gfp = massage_gfp_flags(dev, gfp); |
9a88cbb5 | 93 | |
9a88cbb5 RB |
94 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
95 | ||
1da177e4 | 96 | if (ret) { |
9a88cbb5 RB |
97 | memset(ret, 0, size); |
98 | *dma_handle = plat_map_dma_mem(dev, ret, size); | |
99 | ||
100 | if (!plat_device_is_coherent(dev)) { | |
101 | dma_cache_wback_inv((unsigned long) ret, size); | |
102 | ret = UNCAC_ADDR(ret); | |
103 | } | |
1da177e4 LT |
104 | } |
105 | ||
106 | return ret; | |
107 | } | |
108 | ||
109 | EXPORT_SYMBOL(dma_alloc_coherent); | |
110 | ||
111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
112 | dma_addr_t dma_handle) | |
113 | { | |
d3f634b9 | 114 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
1da177e4 LT |
115 | free_pages((unsigned long) vaddr, get_order(size)); |
116 | } | |
117 | ||
118 | EXPORT_SYMBOL(dma_free_noncoherent); | |
119 | ||
120 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
121 | dma_addr_t dma_handle) | |
122 | { | |
123 | unsigned long addr = (unsigned long) vaddr; | |
124 | ||
d3f634b9 | 125 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
11531ac2 | 126 | |
9a88cbb5 RB |
127 | if (!plat_device_is_coherent(dev)) |
128 | addr = CAC_ADDR(addr); | |
129 | ||
1da177e4 LT |
130 | free_pages(addr, get_order(size)); |
131 | } | |
132 | ||
133 | EXPORT_SYMBOL(dma_free_coherent); | |
134 | ||
135 | static inline void __dma_sync(unsigned long addr, size_t size, | |
136 | enum dma_data_direction direction) | |
137 | { | |
138 | switch (direction) { | |
139 | case DMA_TO_DEVICE: | |
140 | dma_cache_wback(addr, size); | |
141 | break; | |
142 | ||
143 | case DMA_FROM_DEVICE: | |
144 | dma_cache_inv(addr, size); | |
145 | break; | |
146 | ||
147 | case DMA_BIDIRECTIONAL: | |
148 | dma_cache_wback_inv(addr, size); | |
149 | break; | |
150 | ||
151 | default: | |
152 | BUG(); | |
153 | } | |
154 | } | |
155 | ||
156 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
157 | enum dma_data_direction direction) | |
158 | { | |
159 | unsigned long addr = (unsigned long) ptr; | |
160 | ||
9a88cbb5 RB |
161 | if (!plat_device_is_coherent(dev)) |
162 | __dma_sync(addr, size, direction); | |
1da177e4 | 163 | |
9a88cbb5 | 164 | return plat_map_dma_mem(dev, ptr, size); |
1da177e4 LT |
165 | } |
166 | ||
167 | EXPORT_SYMBOL(dma_map_single); | |
168 | ||
169 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
170 | enum dma_data_direction direction) | |
171 | { | |
9a88cbb5 | 172 | if (cpu_is_noncoherent_r10000(dev)) |
c9d06962 | 173 | __dma_sync(dma_addr_to_virt(dma_addr), size, |
9a88cbb5 | 174 | direction); |
1da177e4 | 175 | |
d3f634b9 | 176 | plat_unmap_dma_mem(dev, dma_addr, size, direction); |
1da177e4 LT |
177 | } |
178 | ||
179 | EXPORT_SYMBOL(dma_unmap_single); | |
180 | ||
181 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
182 | enum dma_data_direction direction) | |
183 | { | |
184 | int i; | |
185 | ||
186 | BUG_ON(direction == DMA_NONE); | |
187 | ||
188 | for (i = 0; i < nents; i++, sg++) { | |
189 | unsigned long addr; | |
42a3b4f2 | 190 | |
58b053e4 | 191 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 192 | if (!plat_device_is_coherent(dev) && addr) |
58b053e4 | 193 | __dma_sync(addr, sg->length, direction); |
fbd5604d | 194 | sg->dma_address = plat_map_dma_mem(dev, |
58b053e4 | 195 | (void *)addr, sg->length); |
1da177e4 LT |
196 | } |
197 | ||
198 | return nents; | |
199 | } | |
200 | ||
201 | EXPORT_SYMBOL(dma_map_sg); | |
202 | ||
203 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
204 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
205 | { | |
1da177e4 LT |
206 | BUG_ON(direction == DMA_NONE); |
207 | ||
9a88cbb5 RB |
208 | if (!plat_device_is_coherent(dev)) { |
209 | unsigned long addr; | |
210 | ||
211 | addr = (unsigned long) page_address(page) + offset; | |
4f29c057 | 212 | __dma_sync(addr, size, direction); |
9a88cbb5 | 213 | } |
1da177e4 | 214 | |
9a88cbb5 | 215 | return plat_map_dma_mem_page(dev, page) + offset; |
1da177e4 LT |
216 | } |
217 | ||
218 | EXPORT_SYMBOL(dma_map_page); | |
219 | ||
1da177e4 LT |
220 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
221 | enum dma_data_direction direction) | |
222 | { | |
223 | unsigned long addr; | |
224 | int i; | |
225 | ||
226 | BUG_ON(direction == DMA_NONE); | |
227 | ||
1da177e4 | 228 | for (i = 0; i < nhwentries; i++, sg++) { |
9a88cbb5 RB |
229 | if (!plat_device_is_coherent(dev) && |
230 | direction != DMA_TO_DEVICE) { | |
58b053e4 | 231 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 232 | if (addr) |
58b053e4 | 233 | __dma_sync(addr, sg->length, direction); |
9a88cbb5 | 234 | } |
d3f634b9 | 235 | plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); |
1da177e4 LT |
236 | } |
237 | } | |
238 | ||
239 | EXPORT_SYMBOL(dma_unmap_sg); | |
240 | ||
241 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
242 | size_t size, enum dma_data_direction direction) | |
243 | { | |
1da177e4 | 244 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 245 | |
9a88cbb5 RB |
246 | if (cpu_is_noncoherent_r10000(dev)) { |
247 | unsigned long addr; | |
248 | ||
c9d06962 | 249 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
250 | __dma_sync(addr, size, direction); |
251 | } | |
1da177e4 LT |
252 | } |
253 | ||
254 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
255 | ||
256 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
257 | size_t size, enum dma_data_direction direction) | |
258 | { | |
1da177e4 LT |
259 | BUG_ON(direction == DMA_NONE); |
260 | ||
843aef49 | 261 | plat_extra_sync_for_device(dev); |
9b43fb6b | 262 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
263 | unsigned long addr; |
264 | ||
c9d06962 | 265 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
266 | __dma_sync(addr, size, direction); |
267 | } | |
1da177e4 LT |
268 | } |
269 | ||
270 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
271 | ||
272 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
273 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
274 | { | |
1da177e4 LT |
275 | BUG_ON(direction == DMA_NONE); |
276 | ||
9a88cbb5 RB |
277 | if (cpu_is_noncoherent_r10000(dev)) { |
278 | unsigned long addr; | |
279 | ||
c9d06962 | 280 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
281 | __dma_sync(addr + offset, size, direction); |
282 | } | |
1da177e4 LT |
283 | } |
284 | ||
285 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
286 | ||
287 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
288 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
289 | { | |
1da177e4 LT |
290 | BUG_ON(direction == DMA_NONE); |
291 | ||
843aef49 | 292 | plat_extra_sync_for_device(dev); |
9b43fb6b | 293 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
294 | unsigned long addr; |
295 | ||
c9d06962 | 296 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
297 | __dma_sync(addr + offset, size, direction); |
298 | } | |
1da177e4 LT |
299 | } |
300 | ||
301 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
302 | ||
303 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
304 | enum dma_data_direction direction) | |
305 | { | |
306 | int i; | |
42a3b4f2 | 307 | |
1da177e4 | 308 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 309 | |
1da177e4 | 310 | /* Make sure that gcc doesn't leave the empty loop body. */ |
9a88cbb5 | 311 | for (i = 0; i < nelems; i++, sg++) { |
5b648a98 | 312 | if (cpu_is_noncoherent_r10000(dev)) |
58b053e4 | 313 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 314 | sg->length, direction); |
9a88cbb5 | 315 | } |
1da177e4 LT |
316 | } |
317 | ||
318 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
319 | ||
320 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
321 | enum dma_data_direction direction) | |
322 | { | |
323 | int i; | |
324 | ||
325 | BUG_ON(direction == DMA_NONE); | |
326 | ||
327 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
9a88cbb5 RB |
328 | for (i = 0; i < nelems; i++, sg++) { |
329 | if (!plat_device_is_coherent(dev)) | |
58b053e4 | 330 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 331 | sg->length, direction); |
9a88cbb5 | 332 | } |
1da177e4 LT |
333 | } |
334 | ||
335 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
336 | ||
8d8bb39b | 337 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 338 | { |
843aef49 | 339 | return plat_dma_mapping_error(dev, dma_addr); |
1da177e4 LT |
340 | } |
341 | ||
342 | EXPORT_SYMBOL(dma_mapping_error); | |
343 | ||
344 | int dma_supported(struct device *dev, u64 mask) | |
345 | { | |
843aef49 | 346 | return plat_dma_supported(dev, mask); |
1da177e4 LT |
347 | } |
348 | ||
349 | EXPORT_SYMBOL(dma_supported); | |
350 | ||
f67637ee | 351 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 352 | { |
9a88cbb5 | 353 | return plat_device_is_coherent(dev); |
1da177e4 LT |
354 | } |
355 | ||
356 | EXPORT_SYMBOL(dma_is_consistent); | |
357 | ||
d3fa72e4 | 358 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
9a88cbb5 | 359 | enum dma_data_direction direction) |
1da177e4 | 360 | { |
9a88cbb5 | 361 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 362 | |
843aef49 | 363 | plat_extra_sync_for_device(dev); |
9a88cbb5 | 364 | if (!plat_device_is_coherent(dev)) |
c7c6b390 | 365 | __dma_sync((unsigned long)vaddr, size, direction); |
1da177e4 LT |
366 | } |
367 | ||
368 | EXPORT_SYMBOL(dma_cache_sync); |