]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/mm/dma-default.c
dma-mapping: remove dma_is_consistent API
[net-next-2.6.git] / arch / mips / mm / dma-default.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
9a88cbb5 7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
1da177e4
LT
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
9a88cbb5 10
1da177e4 11#include <linux/types.h>
9a88cbb5 12#include <linux/dma-mapping.h>
1da177e4
LT
13#include <linux/mm.h>
14#include <linux/module.h>
4fcc47a0 15#include <linux/scatterlist.h>
6e86b0bf 16#include <linux/string.h>
5a0e3ad6 17#include <linux/gfp.h>
1da177e4
LT
18
19#include <asm/cache.h>
20#include <asm/io.h>
21
9a88cbb5
RB
22#include <dma-coherence.h>
23
3807ef3f
KC
24static inline unsigned long dma_addr_to_virt(struct device *dev,
25 dma_addr_t dma_addr)
c9d06962 26{
3807ef3f 27 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
c9d06962
FBH
28
29 return (unsigned long)phys_to_virt(addr);
30}
31
1da177e4
LT
32/*
33 * Warning on the terminology - Linux calls an uncached area coherent;
34 * MIPS terminology calls memory areas with hardware maintained coherency
35 * coherent.
36 */
37
9a88cbb5
RB
38static inline int cpu_is_noncoherent_r10000(struct device *dev)
39{
40 return !plat_device_is_coherent(dev) &&
10cc3529
RB
41 (current_cpu_type() == CPU_R10000 ||
42 current_cpu_type() == CPU_R12000);
9a88cbb5
RB
43}
44
cce335ae
RB
45static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46{
47 /* ignore region specifiers */
48 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
49
32016718 50#ifdef CONFIG_ZONE_DMA
cce335ae
RB
51 if (dev == NULL)
52 gfp |= __GFP_DMA;
53 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
54 gfp |= __GFP_DMA;
55 else
56#endif
57#ifdef CONFIG_ZONE_DMA32
58 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59 gfp |= __GFP_DMA32;
60 else
61#endif
62 ;
63
64 /* Don't invoke OOM killer */
65 gfp |= __GFP_NORETRY;
66
67 return gfp;
68}
69
1da177e4 70void *dma_alloc_noncoherent(struct device *dev, size_t size,
185a8ff5 71 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
72{
73 void *ret;
9a88cbb5 74
cce335ae 75 gfp = massage_gfp_flags(dev, gfp);
1da177e4 76
1da177e4
LT
77 ret = (void *) __get_free_pages(gfp, get_order(size));
78
79 if (ret != NULL) {
80 memset(ret, 0, size);
9a88cbb5 81 *dma_handle = plat_map_dma_mem(dev, ret, size);
1da177e4
LT
82 }
83
84 return ret;
85}
86
87EXPORT_SYMBOL(dma_alloc_noncoherent);
88
89void *dma_alloc_coherent(struct device *dev, size_t size,
185a8ff5 90 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
91{
92 void *ret;
93
f8ac0425
YY
94 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
95 return ret;
96
cce335ae 97 gfp = massage_gfp_flags(dev, gfp);
9a88cbb5 98
9a88cbb5
RB
99 ret = (void *) __get_free_pages(gfp, get_order(size));
100
1da177e4 101 if (ret) {
9a88cbb5
RB
102 memset(ret, 0, size);
103 *dma_handle = plat_map_dma_mem(dev, ret, size);
104
105 if (!plat_device_is_coherent(dev)) {
106 dma_cache_wback_inv((unsigned long) ret, size);
107 ret = UNCAC_ADDR(ret);
108 }
1da177e4
LT
109 }
110
111 return ret;
112}
113
114EXPORT_SYMBOL(dma_alloc_coherent);
115
116void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
117 dma_addr_t dma_handle)
118{
d3f634b9 119 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
1da177e4
LT
120 free_pages((unsigned long) vaddr, get_order(size));
121}
122
123EXPORT_SYMBOL(dma_free_noncoherent);
124
125void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
126 dma_addr_t dma_handle)
127{
128 unsigned long addr = (unsigned long) vaddr;
f8ac0425
YY
129 int order = get_order(size);
130
131 if (dma_release_from_coherent(dev, order, vaddr))
132 return;
1da177e4 133
d3f634b9 134 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
11531ac2 135
9a88cbb5
RB
136 if (!plat_device_is_coherent(dev))
137 addr = CAC_ADDR(addr);
138
1da177e4
LT
139 free_pages(addr, get_order(size));
140}
141
142EXPORT_SYMBOL(dma_free_coherent);
143
144static inline void __dma_sync(unsigned long addr, size_t size,
145 enum dma_data_direction direction)
146{
147 switch (direction) {
148 case DMA_TO_DEVICE:
149 dma_cache_wback(addr, size);
150 break;
151
152 case DMA_FROM_DEVICE:
153 dma_cache_inv(addr, size);
154 break;
155
156 case DMA_BIDIRECTIONAL:
157 dma_cache_wback_inv(addr, size);
158 break;
159
160 default:
161 BUG();
162 }
163}
164
165dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
166 enum dma_data_direction direction)
167{
168 unsigned long addr = (unsigned long) ptr;
169
9a88cbb5
RB
170 if (!plat_device_is_coherent(dev))
171 __dma_sync(addr, size, direction);
1da177e4 172
9a88cbb5 173 return plat_map_dma_mem(dev, ptr, size);
1da177e4
LT
174}
175
176EXPORT_SYMBOL(dma_map_single);
177
178void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
179 enum dma_data_direction direction)
180{
9a88cbb5 181 if (cpu_is_noncoherent_r10000(dev))
3807ef3f 182 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
9a88cbb5 183 direction);
1da177e4 184
d3f634b9 185 plat_unmap_dma_mem(dev, dma_addr, size, direction);
1da177e4
LT
186}
187
188EXPORT_SYMBOL(dma_unmap_single);
189
190int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
191 enum dma_data_direction direction)
192{
193 int i;
194
195 BUG_ON(direction == DMA_NONE);
196
197 for (i = 0; i < nents; i++, sg++) {
198 unsigned long addr;
42a3b4f2 199
58b053e4 200 addr = (unsigned long) sg_virt(sg);
9a88cbb5 201 if (!plat_device_is_coherent(dev) && addr)
58b053e4 202 __dma_sync(addr, sg->length, direction);
fbd5604d 203 sg->dma_address = plat_map_dma_mem(dev,
58b053e4 204 (void *)addr, sg->length);
1da177e4
LT
205 }
206
207 return nents;
208}
209
210EXPORT_SYMBOL(dma_map_sg);
211
212dma_addr_t dma_map_page(struct device *dev, struct page *page,
213 unsigned long offset, size_t size, enum dma_data_direction direction)
214{
1da177e4
LT
215 BUG_ON(direction == DMA_NONE);
216
9a88cbb5
RB
217 if (!plat_device_is_coherent(dev)) {
218 unsigned long addr;
219
220 addr = (unsigned long) page_address(page) + offset;
4f29c057 221 __dma_sync(addr, size, direction);
9a88cbb5 222 }
1da177e4 223
9a88cbb5 224 return plat_map_dma_mem_page(dev, page) + offset;
1da177e4
LT
225}
226
227EXPORT_SYMBOL(dma_map_page);
228
1da177e4
LT
229void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
230 enum dma_data_direction direction)
231{
232 unsigned long addr;
233 int i;
234
235 BUG_ON(direction == DMA_NONE);
236
1da177e4 237 for (i = 0; i < nhwentries; i++, sg++) {
9a88cbb5
RB
238 if (!plat_device_is_coherent(dev) &&
239 direction != DMA_TO_DEVICE) {
58b053e4 240 addr = (unsigned long) sg_virt(sg);
9a88cbb5 241 if (addr)
58b053e4 242 __dma_sync(addr, sg->length, direction);
9a88cbb5 243 }
d3f634b9 244 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
1da177e4
LT
245 }
246}
247
248EXPORT_SYMBOL(dma_unmap_sg);
249
250void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
251 size_t size, enum dma_data_direction direction)
252{
1da177e4 253 BUG_ON(direction == DMA_NONE);
42a3b4f2 254
9a88cbb5
RB
255 if (cpu_is_noncoherent_r10000(dev)) {
256 unsigned long addr;
257
3807ef3f 258 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
259 __dma_sync(addr, size, direction);
260 }
1da177e4
LT
261}
262
263EXPORT_SYMBOL(dma_sync_single_for_cpu);
264
265void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
266 size_t size, enum dma_data_direction direction)
267{
1da177e4
LT
268 BUG_ON(direction == DMA_NONE);
269
843aef49 270 plat_extra_sync_for_device(dev);
9b43fb6b 271 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
272 unsigned long addr;
273
3807ef3f 274 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
275 __dma_sync(addr, size, direction);
276 }
1da177e4
LT
277}
278
279EXPORT_SYMBOL(dma_sync_single_for_device);
280
281void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
282 unsigned long offset, size_t size, enum dma_data_direction direction)
283{
1da177e4
LT
284 BUG_ON(direction == DMA_NONE);
285
9a88cbb5
RB
286 if (cpu_is_noncoherent_r10000(dev)) {
287 unsigned long addr;
288
3807ef3f 289 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
290 __dma_sync(addr + offset, size, direction);
291 }
1da177e4
LT
292}
293
294EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
295
296void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
297 unsigned long offset, size_t size, enum dma_data_direction direction)
298{
1da177e4
LT
299 BUG_ON(direction == DMA_NONE);
300
843aef49 301 plat_extra_sync_for_device(dev);
9b43fb6b 302 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
303 unsigned long addr;
304
3807ef3f 305 addr = dma_addr_to_virt(dev, dma_handle);
9a88cbb5
RB
306 __dma_sync(addr + offset, size, direction);
307 }
1da177e4
LT
308}
309
310EXPORT_SYMBOL(dma_sync_single_range_for_device);
311
312void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
313 enum dma_data_direction direction)
314{
315 int i;
42a3b4f2 316
1da177e4 317 BUG_ON(direction == DMA_NONE);
42a3b4f2 318
1da177e4 319 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5 320 for (i = 0; i < nelems; i++, sg++) {
5b648a98 321 if (cpu_is_noncoherent_r10000(dev))
58b053e4 322 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5 323 sg->length, direction);
9a88cbb5 324 }
1da177e4
LT
325}
326
327EXPORT_SYMBOL(dma_sync_sg_for_cpu);
328
329void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
330 enum dma_data_direction direction)
331{
332 int i;
333
334 BUG_ON(direction == DMA_NONE);
335
336 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5
RB
337 for (i = 0; i < nelems; i++, sg++) {
338 if (!plat_device_is_coherent(dev))
58b053e4 339 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5 340 sg->length, direction);
9a88cbb5 341 }
1da177e4
LT
342}
343
344EXPORT_SYMBOL(dma_sync_sg_for_device);
345
8d8bb39b 346int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1da177e4 347{
843aef49 348 return plat_dma_mapping_error(dev, dma_addr);
1da177e4
LT
349}
350
351EXPORT_SYMBOL(dma_mapping_error);
352
353int dma_supported(struct device *dev, u64 mask)
354{
843aef49 355 return plat_dma_supported(dev, mask);
1da177e4
LT
356}
357
358EXPORT_SYMBOL(dma_supported);
359
d3fa72e4 360void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
9a88cbb5 361 enum dma_data_direction direction)
1da177e4 362{
9a88cbb5 363 BUG_ON(direction == DMA_NONE);
1da177e4 364
843aef49 365 plat_extra_sync_for_device(dev);
9a88cbb5 366 if (!plat_device_is_coherent(dev))
c7c6b390 367 __dma_sync((unsigned long)vaddr, size, direction);
1da177e4
LT
368}
369
370EXPORT_SYMBOL(dma_cache_sync);