]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | |
9a88cbb5 | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
1da177e4 LT |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | |
9a88cbb5 | 10 | |
1da177e4 | 11 | #include <linux/types.h> |
9a88cbb5 | 12 | #include <linux/dma-mapping.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
4fcc47a0 | 15 | #include <linux/scatterlist.h> |
6e86b0bf | 16 | #include <linux/string.h> |
1da177e4 LT |
17 | |
18 | #include <asm/cache.h> | |
19 | #include <asm/io.h> | |
20 | ||
9a88cbb5 RB |
21 | #include <dma-coherence.h> |
22 | ||
c9d06962 FBH |
23 | static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) |
24 | { | |
25 | unsigned long addr = plat_dma_addr_to_phys(dma_addr); | |
26 | ||
27 | return (unsigned long)phys_to_virt(addr); | |
28 | } | |
29 | ||
1da177e4 LT |
30 | /* |
31 | * Warning on the terminology - Linux calls an uncached area coherent; | |
32 | * MIPS terminology calls memory areas with hardware maintained coherency | |
33 | * coherent. | |
34 | */ | |
35 | ||
9a88cbb5 RB |
36 | static inline int cpu_is_noncoherent_r10000(struct device *dev) |
37 | { | |
38 | return !plat_device_is_coherent(dev) && | |
10cc3529 RB |
39 | (current_cpu_type() == CPU_R10000 || |
40 | current_cpu_type() == CPU_R12000); | |
9a88cbb5 RB |
41 | } |
42 | ||
cce335ae RB |
43 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
44 | { | |
45 | /* ignore region specifiers */ | |
46 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | |
47 | ||
32016718 | 48 | #ifdef CONFIG_ZONE_DMA |
cce335ae RB |
49 | if (dev == NULL) |
50 | gfp |= __GFP_DMA; | |
51 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) | |
52 | gfp |= __GFP_DMA; | |
53 | else | |
54 | #endif | |
55 | #ifdef CONFIG_ZONE_DMA32 | |
56 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) | |
57 | gfp |= __GFP_DMA32; | |
58 | else | |
59 | #endif | |
60 | ; | |
61 | ||
62 | /* Don't invoke OOM killer */ | |
63 | gfp |= __GFP_NORETRY; | |
64 | ||
65 | return gfp; | |
66 | } | |
67 | ||
1da177e4 | 68 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
185a8ff5 | 69 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
70 | { |
71 | void *ret; | |
9a88cbb5 | 72 | |
cce335ae | 73 | gfp = massage_gfp_flags(dev, gfp); |
1da177e4 | 74 | |
1da177e4 LT |
75 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
76 | ||
77 | if (ret != NULL) { | |
78 | memset(ret, 0, size); | |
9a88cbb5 | 79 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
1da177e4 LT |
80 | } |
81 | ||
82 | return ret; | |
83 | } | |
84 | ||
85 | EXPORT_SYMBOL(dma_alloc_noncoherent); | |
86 | ||
87 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
185a8ff5 | 88 | dma_addr_t * dma_handle, gfp_t gfp) |
1da177e4 LT |
89 | { |
90 | void *ret; | |
91 | ||
cce335ae | 92 | gfp = massage_gfp_flags(dev, gfp); |
9a88cbb5 | 93 | |
9a88cbb5 RB |
94 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
95 | ||
1da177e4 | 96 | if (ret) { |
9a88cbb5 RB |
97 | memset(ret, 0, size); |
98 | *dma_handle = plat_map_dma_mem(dev, ret, size); | |
99 | ||
100 | if (!plat_device_is_coherent(dev)) { | |
101 | dma_cache_wback_inv((unsigned long) ret, size); | |
102 | ret = UNCAC_ADDR(ret); | |
103 | } | |
1da177e4 LT |
104 | } |
105 | ||
106 | return ret; | |
107 | } | |
108 | ||
109 | EXPORT_SYMBOL(dma_alloc_coherent); | |
110 | ||
111 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | |
112 | dma_addr_t dma_handle) | |
113 | { | |
843aef49 | 114 | plat_unmap_dma_mem(dev, dma_handle); |
1da177e4 LT |
115 | free_pages((unsigned long) vaddr, get_order(size)); |
116 | } | |
117 | ||
118 | EXPORT_SYMBOL(dma_free_noncoherent); | |
119 | ||
120 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |
121 | dma_addr_t dma_handle) | |
122 | { | |
123 | unsigned long addr = (unsigned long) vaddr; | |
124 | ||
843aef49 | 125 | plat_unmap_dma_mem(dev, dma_handle); |
11531ac2 | 126 | |
9a88cbb5 RB |
127 | if (!plat_device_is_coherent(dev)) |
128 | addr = CAC_ADDR(addr); | |
129 | ||
1da177e4 LT |
130 | free_pages(addr, get_order(size)); |
131 | } | |
132 | ||
133 | EXPORT_SYMBOL(dma_free_coherent); | |
134 | ||
135 | static inline void __dma_sync(unsigned long addr, size_t size, | |
136 | enum dma_data_direction direction) | |
137 | { | |
138 | switch (direction) { | |
139 | case DMA_TO_DEVICE: | |
140 | dma_cache_wback(addr, size); | |
141 | break; | |
142 | ||
143 | case DMA_FROM_DEVICE: | |
144 | dma_cache_inv(addr, size); | |
145 | break; | |
146 | ||
147 | case DMA_BIDIRECTIONAL: | |
148 | dma_cache_wback_inv(addr, size); | |
149 | break; | |
150 | ||
151 | default: | |
152 | BUG(); | |
153 | } | |
154 | } | |
155 | ||
156 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |
157 | enum dma_data_direction direction) | |
158 | { | |
159 | unsigned long addr = (unsigned long) ptr; | |
160 | ||
9a88cbb5 RB |
161 | if (!plat_device_is_coherent(dev)) |
162 | __dma_sync(addr, size, direction); | |
1da177e4 | 163 | |
9a88cbb5 | 164 | return plat_map_dma_mem(dev, ptr, size); |
1da177e4 LT |
165 | } |
166 | ||
167 | EXPORT_SYMBOL(dma_map_single); | |
168 | ||
169 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
170 | enum dma_data_direction direction) | |
171 | { | |
9a88cbb5 | 172 | if (cpu_is_noncoherent_r10000(dev)) |
c9d06962 | 173 | __dma_sync(dma_addr_to_virt(dma_addr), size, |
9a88cbb5 | 174 | direction); |
1da177e4 | 175 | |
843aef49 | 176 | plat_unmap_dma_mem(dev, dma_addr); |
1da177e4 LT |
177 | } |
178 | ||
179 | EXPORT_SYMBOL(dma_unmap_single); | |
180 | ||
181 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
182 | enum dma_data_direction direction) | |
183 | { | |
184 | int i; | |
185 | ||
186 | BUG_ON(direction == DMA_NONE); | |
187 | ||
188 | for (i = 0; i < nents; i++, sg++) { | |
189 | unsigned long addr; | |
42a3b4f2 | 190 | |
58b053e4 | 191 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 192 | if (!plat_device_is_coherent(dev) && addr) |
58b053e4 | 193 | __dma_sync(addr, sg->length, direction); |
fbd5604d | 194 | sg->dma_address = plat_map_dma_mem(dev, |
58b053e4 | 195 | (void *)addr, sg->length); |
1da177e4 LT |
196 | } |
197 | ||
198 | return nents; | |
199 | } | |
200 | ||
201 | EXPORT_SYMBOL(dma_map_sg); | |
202 | ||
203 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
204 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
205 | { | |
1da177e4 LT |
206 | BUG_ON(direction == DMA_NONE); |
207 | ||
9a88cbb5 RB |
208 | if (!plat_device_is_coherent(dev)) { |
209 | unsigned long addr; | |
210 | ||
211 | addr = (unsigned long) page_address(page) + offset; | |
212 | dma_cache_wback_inv(addr, size); | |
213 | } | |
1da177e4 | 214 | |
9a88cbb5 | 215 | return plat_map_dma_mem_page(dev, page) + offset; |
1da177e4 LT |
216 | } |
217 | ||
218 | EXPORT_SYMBOL(dma_map_page); | |
219 | ||
220 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |
221 | enum dma_data_direction direction) | |
222 | { | |
223 | BUG_ON(direction == DMA_NONE); | |
224 | ||
9a88cbb5 | 225 | if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { |
1da177e4 LT |
226 | unsigned long addr; |
227 | ||
d7001198 | 228 | addr = dma_addr_to_virt(dma_address); |
1da177e4 LT |
229 | dma_cache_wback_inv(addr, size); |
230 | } | |
9a88cbb5 | 231 | |
843aef49 | 232 | plat_unmap_dma_mem(dev, dma_address); |
1da177e4 LT |
233 | } |
234 | ||
235 | EXPORT_SYMBOL(dma_unmap_page); | |
236 | ||
237 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |
238 | enum dma_data_direction direction) | |
239 | { | |
240 | unsigned long addr; | |
241 | int i; | |
242 | ||
243 | BUG_ON(direction == DMA_NONE); | |
244 | ||
1da177e4 | 245 | for (i = 0; i < nhwentries; i++, sg++) { |
9a88cbb5 RB |
246 | if (!plat_device_is_coherent(dev) && |
247 | direction != DMA_TO_DEVICE) { | |
58b053e4 | 248 | addr = (unsigned long) sg_virt(sg); |
9a88cbb5 | 249 | if (addr) |
58b053e4 | 250 | __dma_sync(addr, sg->length, direction); |
9a88cbb5 | 251 | } |
843aef49 | 252 | plat_unmap_dma_mem(dev, sg->dma_address); |
1da177e4 LT |
253 | } |
254 | } | |
255 | ||
256 | EXPORT_SYMBOL(dma_unmap_sg); | |
257 | ||
258 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
259 | size_t size, enum dma_data_direction direction) | |
260 | { | |
1da177e4 | 261 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 262 | |
9a88cbb5 RB |
263 | if (cpu_is_noncoherent_r10000(dev)) { |
264 | unsigned long addr; | |
265 | ||
c9d06962 | 266 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
267 | __dma_sync(addr, size, direction); |
268 | } | |
1da177e4 LT |
269 | } |
270 | ||
271 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | |
272 | ||
273 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | |
274 | size_t size, enum dma_data_direction direction) | |
275 | { | |
1da177e4 LT |
276 | BUG_ON(direction == DMA_NONE); |
277 | ||
843aef49 | 278 | plat_extra_sync_for_device(dev); |
9b43fb6b | 279 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
280 | unsigned long addr; |
281 | ||
c9d06962 | 282 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
283 | __dma_sync(addr, size, direction); |
284 | } | |
1da177e4 LT |
285 | } |
286 | ||
287 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
288 | ||
289 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | |
290 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
291 | { | |
1da177e4 LT |
292 | BUG_ON(direction == DMA_NONE); |
293 | ||
9a88cbb5 RB |
294 | if (cpu_is_noncoherent_r10000(dev)) { |
295 | unsigned long addr; | |
296 | ||
c9d06962 | 297 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
298 | __dma_sync(addr + offset, size, direction); |
299 | } | |
1da177e4 LT |
300 | } |
301 | ||
302 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |
303 | ||
304 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | |
305 | unsigned long offset, size_t size, enum dma_data_direction direction) | |
306 | { | |
1da177e4 LT |
307 | BUG_ON(direction == DMA_NONE); |
308 | ||
843aef49 | 309 | plat_extra_sync_for_device(dev); |
9b43fb6b | 310 | if (!plat_device_is_coherent(dev)) { |
9a88cbb5 RB |
311 | unsigned long addr; |
312 | ||
c9d06962 | 313 | addr = dma_addr_to_virt(dma_handle); |
9a88cbb5 RB |
314 | __dma_sync(addr + offset, size, direction); |
315 | } | |
1da177e4 LT |
316 | } |
317 | ||
318 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | |
319 | ||
320 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |
321 | enum dma_data_direction direction) | |
322 | { | |
323 | int i; | |
42a3b4f2 | 324 | |
1da177e4 | 325 | BUG_ON(direction == DMA_NONE); |
42a3b4f2 | 326 | |
1da177e4 | 327 | /* Make sure that gcc doesn't leave the empty loop body. */ |
9a88cbb5 | 328 | for (i = 0; i < nelems; i++, sg++) { |
5b648a98 | 329 | if (cpu_is_noncoherent_r10000(dev)) |
58b053e4 | 330 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 331 | sg->length, direction); |
9a88cbb5 | 332 | } |
1da177e4 LT |
333 | } |
334 | ||
335 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | |
336 | ||
337 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | |
338 | enum dma_data_direction direction) | |
339 | { | |
340 | int i; | |
341 | ||
342 | BUG_ON(direction == DMA_NONE); | |
343 | ||
344 | /* Make sure that gcc doesn't leave the empty loop body. */ | |
9a88cbb5 RB |
345 | for (i = 0; i < nelems; i++, sg++) { |
346 | if (!plat_device_is_coherent(dev)) | |
58b053e4 | 347 | __dma_sync((unsigned long)page_address(sg_page(sg)), |
9a88cbb5 | 348 | sg->length, direction); |
9a88cbb5 | 349 | } |
1da177e4 LT |
350 | } |
351 | ||
352 | EXPORT_SYMBOL(dma_sync_sg_for_device); | |
353 | ||
8d8bb39b | 354 | int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 355 | { |
843aef49 | 356 | return plat_dma_mapping_error(dev, dma_addr); |
1da177e4 LT |
357 | } |
358 | ||
359 | EXPORT_SYMBOL(dma_mapping_error); | |
360 | ||
361 | int dma_supported(struct device *dev, u64 mask) | |
362 | { | |
843aef49 | 363 | return plat_dma_supported(dev, mask); |
1da177e4 LT |
364 | } |
365 | ||
366 | EXPORT_SYMBOL(dma_supported); | |
367 | ||
f67637ee | 368 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
1da177e4 | 369 | { |
9a88cbb5 | 370 | return plat_device_is_coherent(dev); |
1da177e4 LT |
371 | } |
372 | ||
373 | EXPORT_SYMBOL(dma_is_consistent); | |
374 | ||
d3fa72e4 | 375 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
9a88cbb5 | 376 | enum dma_data_direction direction) |
1da177e4 | 377 | { |
9a88cbb5 | 378 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 379 | |
843aef49 | 380 | plat_extra_sync_for_device(dev); |
9a88cbb5 | 381 | if (!plat_device_is_coherent(dev)) |
c7c6b390 | 382 | __dma_sync((unsigned long)vaddr, size, direction); |
1da177e4 LT |
383 | } |
384 | ||
385 | EXPORT_SYMBOL(dma_cache_sync); |