]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/mm/dma-default.c
MIPS: Kconfig: Fix the arch-specific header path
[net-next-2.6.git] / arch / mips / mm / dma-default.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
9a88cbb5 7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
1da177e4
LT
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
9a88cbb5 10
1da177e4 11#include <linux/types.h>
9a88cbb5 12#include <linux/dma-mapping.h>
1da177e4
LT
13#include <linux/mm.h>
14#include <linux/module.h>
4fcc47a0 15#include <linux/scatterlist.h>
6e86b0bf 16#include <linux/string.h>
1da177e4
LT
17
18#include <asm/cache.h>
19#include <asm/io.h>
20
9a88cbb5
RB
21#include <dma-coherence.h>
22
c9d06962
FBH
23static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
24{
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
26
27 return (unsigned long)phys_to_virt(addr);
28}
29
1da177e4
LT
30/*
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
33 * coherent.
34 */
35
9a88cbb5
RB
36static inline int cpu_is_noncoherent_r10000(struct device *dev)
37{
38 return !plat_device_is_coherent(dev) &&
10cc3529
RB
39 (current_cpu_type() == CPU_R10000 ||
40 current_cpu_type() == CPU_R12000);
9a88cbb5
RB
41}
42
cce335ae
RB
43static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
44{
45 /* ignore region specifiers */
46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
47
32016718 48#ifdef CONFIG_ZONE_DMA
cce335ae
RB
49 if (dev == NULL)
50 gfp |= __GFP_DMA;
51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
52 gfp |= __GFP_DMA;
53 else
54#endif
55#ifdef CONFIG_ZONE_DMA32
56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
57 gfp |= __GFP_DMA32;
58 else
59#endif
60 ;
61
62 /* Don't invoke OOM killer */
63 gfp |= __GFP_NORETRY;
64
65 return gfp;
66}
67
1da177e4 68void *dma_alloc_noncoherent(struct device *dev, size_t size,
185a8ff5 69 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
70{
71 void *ret;
9a88cbb5 72
cce335ae 73 gfp = massage_gfp_flags(dev, gfp);
1da177e4 74
1da177e4
LT
75 ret = (void *) __get_free_pages(gfp, get_order(size));
76
77 if (ret != NULL) {
78 memset(ret, 0, size);
9a88cbb5 79 *dma_handle = plat_map_dma_mem(dev, ret, size);
1da177e4
LT
80 }
81
82 return ret;
83}
84
85EXPORT_SYMBOL(dma_alloc_noncoherent);
86
87void *dma_alloc_coherent(struct device *dev, size_t size,
185a8ff5 88 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
89{
90 void *ret;
91
cce335ae 92 gfp = massage_gfp_flags(dev, gfp);
9a88cbb5 93
9a88cbb5
RB
94 ret = (void *) __get_free_pages(gfp, get_order(size));
95
1da177e4 96 if (ret) {
9a88cbb5
RB
97 memset(ret, 0, size);
98 *dma_handle = plat_map_dma_mem(dev, ret, size);
99
100 if (!plat_device_is_coherent(dev)) {
101 dma_cache_wback_inv((unsigned long) ret, size);
102 ret = UNCAC_ADDR(ret);
103 }
1da177e4
LT
104 }
105
106 return ret;
107}
108
109EXPORT_SYMBOL(dma_alloc_coherent);
110
111void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle)
113{
114 free_pages((unsigned long) vaddr, get_order(size));
115}
116
117EXPORT_SYMBOL(dma_free_noncoherent);
118
119void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
120 dma_addr_t dma_handle)
121{
122 unsigned long addr = (unsigned long) vaddr;
123
9a88cbb5
RB
124 if (!plat_device_is_coherent(dev))
125 addr = CAC_ADDR(addr);
126
1da177e4
LT
127 free_pages(addr, get_order(size));
128}
129
130EXPORT_SYMBOL(dma_free_coherent);
131
132static inline void __dma_sync(unsigned long addr, size_t size,
133 enum dma_data_direction direction)
134{
135 switch (direction) {
136 case DMA_TO_DEVICE:
137 dma_cache_wback(addr, size);
138 break;
139
140 case DMA_FROM_DEVICE:
141 dma_cache_inv(addr, size);
142 break;
143
144 case DMA_BIDIRECTIONAL:
145 dma_cache_wback_inv(addr, size);
146 break;
147
148 default:
149 BUG();
150 }
151}
152
153dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
154 enum dma_data_direction direction)
155{
156 unsigned long addr = (unsigned long) ptr;
157
9a88cbb5
RB
158 if (!plat_device_is_coherent(dev))
159 __dma_sync(addr, size, direction);
1da177e4 160
9a88cbb5 161 return plat_map_dma_mem(dev, ptr, size);
1da177e4
LT
162}
163
164EXPORT_SYMBOL(dma_map_single);
165
166void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
167 enum dma_data_direction direction)
168{
9a88cbb5 169 if (cpu_is_noncoherent_r10000(dev))
c9d06962 170 __dma_sync(dma_addr_to_virt(dma_addr), size,
9a88cbb5 171 direction);
1da177e4 172
9a88cbb5 173 plat_unmap_dma_mem(dma_addr);
1da177e4
LT
174}
175
176EXPORT_SYMBOL(dma_unmap_single);
177
178int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
179 enum dma_data_direction direction)
180{
181 int i;
182
183 BUG_ON(direction == DMA_NONE);
184
185 for (i = 0; i < nents; i++, sg++) {
186 unsigned long addr;
42a3b4f2 187
58b053e4 188 addr = (unsigned long) sg_virt(sg);
9a88cbb5 189 if (!plat_device_is_coherent(dev) && addr)
58b053e4 190 __dma_sync(addr, sg->length, direction);
fbd5604d 191 sg->dma_address = plat_map_dma_mem(dev,
58b053e4 192 (void *)addr, sg->length);
1da177e4
LT
193 }
194
195 return nents;
196}
197
198EXPORT_SYMBOL(dma_map_sg);
199
200dma_addr_t dma_map_page(struct device *dev, struct page *page,
201 unsigned long offset, size_t size, enum dma_data_direction direction)
202{
1da177e4
LT
203 BUG_ON(direction == DMA_NONE);
204
9a88cbb5
RB
205 if (!plat_device_is_coherent(dev)) {
206 unsigned long addr;
207
208 addr = (unsigned long) page_address(page) + offset;
209 dma_cache_wback_inv(addr, size);
210 }
1da177e4 211
9a88cbb5 212 return plat_map_dma_mem_page(dev, page) + offset;
1da177e4
LT
213}
214
215EXPORT_SYMBOL(dma_map_page);
216
217void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
218 enum dma_data_direction direction)
219{
220 BUG_ON(direction == DMA_NONE);
221
9a88cbb5 222 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
1da177e4
LT
223 unsigned long addr;
224
9a88cbb5 225 addr = plat_dma_addr_to_phys(dma_address);
1da177e4
LT
226 dma_cache_wback_inv(addr, size);
227 }
9a88cbb5
RB
228
229 plat_unmap_dma_mem(dma_address);
1da177e4
LT
230}
231
232EXPORT_SYMBOL(dma_unmap_page);
233
234void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
235 enum dma_data_direction direction)
236{
237 unsigned long addr;
238 int i;
239
240 BUG_ON(direction == DMA_NONE);
241
1da177e4 242 for (i = 0; i < nhwentries; i++, sg++) {
9a88cbb5
RB
243 if (!plat_device_is_coherent(dev) &&
244 direction != DMA_TO_DEVICE) {
58b053e4 245 addr = (unsigned long) sg_virt(sg);
9a88cbb5 246 if (addr)
58b053e4 247 __dma_sync(addr, sg->length, direction);
9a88cbb5
RB
248 }
249 plat_unmap_dma_mem(sg->dma_address);
1da177e4
LT
250 }
251}
252
253EXPORT_SYMBOL(dma_unmap_sg);
254
255void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
256 size_t size, enum dma_data_direction direction)
257{
1da177e4 258 BUG_ON(direction == DMA_NONE);
42a3b4f2 259
9a88cbb5
RB
260 if (cpu_is_noncoherent_r10000(dev)) {
261 unsigned long addr;
262
c9d06962 263 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
264 __dma_sync(addr, size, direction);
265 }
1da177e4
LT
266}
267
268EXPORT_SYMBOL(dma_sync_single_for_cpu);
269
270void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
271 size_t size, enum dma_data_direction direction)
272{
1da177e4
LT
273 BUG_ON(direction == DMA_NONE);
274
9b43fb6b 275 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
276 unsigned long addr;
277
c9d06962 278 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
279 __dma_sync(addr, size, direction);
280 }
1da177e4
LT
281}
282
283EXPORT_SYMBOL(dma_sync_single_for_device);
284
285void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
286 unsigned long offset, size_t size, enum dma_data_direction direction)
287{
1da177e4
LT
288 BUG_ON(direction == DMA_NONE);
289
9a88cbb5
RB
290 if (cpu_is_noncoherent_r10000(dev)) {
291 unsigned long addr;
292
c9d06962 293 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
294 __dma_sync(addr + offset, size, direction);
295 }
1da177e4
LT
296}
297
298EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
299
300void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
301 unsigned long offset, size_t size, enum dma_data_direction direction)
302{
1da177e4
LT
303 BUG_ON(direction == DMA_NONE);
304
9b43fb6b 305 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
306 unsigned long addr;
307
c9d06962 308 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
309 __dma_sync(addr + offset, size, direction);
310 }
1da177e4
LT
311}
312
313EXPORT_SYMBOL(dma_sync_single_range_for_device);
314
315void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
316 enum dma_data_direction direction)
317{
318 int i;
42a3b4f2 319
1da177e4 320 BUG_ON(direction == DMA_NONE);
42a3b4f2 321
1da177e4 322 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5 323 for (i = 0; i < nelems; i++, sg++) {
5b648a98 324 if (cpu_is_noncoherent_r10000(dev))
58b053e4 325 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5 326 sg->length, direction);
9a88cbb5 327 }
1da177e4
LT
328}
329
330EXPORT_SYMBOL(dma_sync_sg_for_cpu);
331
332void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
333 enum dma_data_direction direction)
334{
335 int i;
336
337 BUG_ON(direction == DMA_NONE);
338
339 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5
RB
340 for (i = 0; i < nelems; i++, sg++) {
341 if (!plat_device_is_coherent(dev))
58b053e4 342 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5 343 sg->length, direction);
9a88cbb5 344 }
1da177e4
LT
345}
346
347EXPORT_SYMBOL(dma_sync_sg_for_device);
348
8d8bb39b 349int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1da177e4
LT
350{
351 return 0;
352}
353
354EXPORT_SYMBOL(dma_mapping_error);
355
356int dma_supported(struct device *dev, u64 mask)
357{
358 /*
359 * we fall back to GFP_DMA when the mask isn't all 1s,
360 * so we can't guarantee allocations that must be
361 * within a tighter range than GFP_DMA..
362 */
cce335ae 363 if (mask < DMA_BIT_MASK(24))
1da177e4
LT
364 return 0;
365
366 return 1;
367}
368
369EXPORT_SYMBOL(dma_supported);
370
f67637ee 371int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
1da177e4 372{
9a88cbb5 373 return plat_device_is_coherent(dev);
1da177e4
LT
374}
375
376EXPORT_SYMBOL(dma_is_consistent);
377
d3fa72e4 378void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
9a88cbb5 379 enum dma_data_direction direction)
1da177e4 380{
9a88cbb5 381 BUG_ON(direction == DMA_NONE);
1da177e4 382
9a88cbb5 383 if (!plat_device_is_coherent(dev))
c7c6b390 384 __dma_sync((unsigned long)vaddr, size, direction);
1da177e4
LT
385}
386
387EXPORT_SYMBOL(dma_cache_sync);