]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/mm/dma-default.c
[MIPS] time: set clock before clockevent_delta2ns() in GT641xx.
[net-next-2.6.git] / arch / mips / mm / dma-default.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
9a88cbb5 7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
1da177e4
LT
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
9a88cbb5 10
1da177e4 11#include <linux/types.h>
9a88cbb5 12#include <linux/dma-mapping.h>
1da177e4
LT
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/string.h>
4fcc47a0 16#include <linux/scatterlist.h>
1da177e4
LT
17
18#include <asm/cache.h>
19#include <asm/io.h>
20
9a88cbb5
RB
21#include <dma-coherence.h>
22
c9d06962
FBH
23static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
24{
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
26
27 return (unsigned long)phys_to_virt(addr);
28}
29
1da177e4
LT
30/*
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
33 * coherent.
34 */
35
9a88cbb5
RB
36static inline int cpu_is_noncoherent_r10000(struct device *dev)
37{
38 return !plat_device_is_coherent(dev) &&
10cc3529
RB
39 (current_cpu_type() == CPU_R10000 ||
40 current_cpu_type() == CPU_R12000);
9a88cbb5
RB
41}
42
1da177e4 43void *dma_alloc_noncoherent(struct device *dev, size_t size,
185a8ff5 44 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
45{
46 void *ret;
9a88cbb5 47
1da177e4
LT
48 /* ignore region specifiers */
49 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
50
51 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
52 gfp |= GFP_DMA;
53 ret = (void *) __get_free_pages(gfp, get_order(size));
54
55 if (ret != NULL) {
56 memset(ret, 0, size);
9a88cbb5 57 *dma_handle = plat_map_dma_mem(dev, ret, size);
1da177e4
LT
58 }
59
60 return ret;
61}
62
63EXPORT_SYMBOL(dma_alloc_noncoherent);
64
65void *dma_alloc_coherent(struct device *dev, size_t size,
185a8ff5 66 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
67{
68 void *ret;
69
9a88cbb5
RB
70 /* ignore region specifiers */
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
72
73 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
74 gfp |= GFP_DMA;
75 ret = (void *) __get_free_pages(gfp, get_order(size));
76
1da177e4 77 if (ret) {
9a88cbb5
RB
78 memset(ret, 0, size);
79 *dma_handle = plat_map_dma_mem(dev, ret, size);
80
81 if (!plat_device_is_coherent(dev)) {
82 dma_cache_wback_inv((unsigned long) ret, size);
83 ret = UNCAC_ADDR(ret);
84 }
1da177e4
LT
85 }
86
87 return ret;
88}
89
90EXPORT_SYMBOL(dma_alloc_coherent);
91
92void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
93 dma_addr_t dma_handle)
94{
95 free_pages((unsigned long) vaddr, get_order(size));
96}
97
98EXPORT_SYMBOL(dma_free_noncoherent);
99
100void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
101 dma_addr_t dma_handle)
102{
103 unsigned long addr = (unsigned long) vaddr;
104
9a88cbb5
RB
105 if (!plat_device_is_coherent(dev))
106 addr = CAC_ADDR(addr);
107
1da177e4
LT
108 free_pages(addr, get_order(size));
109}
110
111EXPORT_SYMBOL(dma_free_coherent);
112
113static inline void __dma_sync(unsigned long addr, size_t size,
114 enum dma_data_direction direction)
115{
116 switch (direction) {
117 case DMA_TO_DEVICE:
118 dma_cache_wback(addr, size);
119 break;
120
121 case DMA_FROM_DEVICE:
122 dma_cache_inv(addr, size);
123 break;
124
125 case DMA_BIDIRECTIONAL:
126 dma_cache_wback_inv(addr, size);
127 break;
128
129 default:
130 BUG();
131 }
132}
133
134dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
135 enum dma_data_direction direction)
136{
137 unsigned long addr = (unsigned long) ptr;
138
9a88cbb5
RB
139 if (!plat_device_is_coherent(dev))
140 __dma_sync(addr, size, direction);
1da177e4 141
9a88cbb5 142 return plat_map_dma_mem(dev, ptr, size);
1da177e4
LT
143}
144
145EXPORT_SYMBOL(dma_map_single);
146
147void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
148 enum dma_data_direction direction)
149{
9a88cbb5 150 if (cpu_is_noncoherent_r10000(dev))
c9d06962 151 __dma_sync(dma_addr_to_virt(dma_addr), size,
9a88cbb5 152 direction);
1da177e4 153
9a88cbb5 154 plat_unmap_dma_mem(dma_addr);
1da177e4
LT
155}
156
157EXPORT_SYMBOL(dma_unmap_single);
158
159int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
160 enum dma_data_direction direction)
161{
162 int i;
163
164 BUG_ON(direction == DMA_NONE);
165
166 for (i = 0; i < nents; i++, sg++) {
167 unsigned long addr;
42a3b4f2 168
58b053e4 169 addr = (unsigned long) sg_virt(sg);
9a88cbb5 170 if (!plat_device_is_coherent(dev) && addr)
58b053e4 171 __dma_sync(addr, sg->length, direction);
fbd5604d 172 sg->dma_address = plat_map_dma_mem(dev,
58b053e4 173 (void *)addr, sg->length);
1da177e4
LT
174 }
175
176 return nents;
177}
178
179EXPORT_SYMBOL(dma_map_sg);
180
181dma_addr_t dma_map_page(struct device *dev, struct page *page,
182 unsigned long offset, size_t size, enum dma_data_direction direction)
183{
1da177e4
LT
184 BUG_ON(direction == DMA_NONE);
185
9a88cbb5
RB
186 if (!plat_device_is_coherent(dev)) {
187 unsigned long addr;
188
189 addr = (unsigned long) page_address(page) + offset;
190 dma_cache_wback_inv(addr, size);
191 }
1da177e4 192
9a88cbb5 193 return plat_map_dma_mem_page(dev, page) + offset;
1da177e4
LT
194}
195
196EXPORT_SYMBOL(dma_map_page);
197
198void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
199 enum dma_data_direction direction)
200{
201 BUG_ON(direction == DMA_NONE);
202
9a88cbb5 203 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
1da177e4
LT
204 unsigned long addr;
205
9a88cbb5 206 addr = plat_dma_addr_to_phys(dma_address);
1da177e4
LT
207 dma_cache_wback_inv(addr, size);
208 }
9a88cbb5
RB
209
210 plat_unmap_dma_mem(dma_address);
1da177e4
LT
211}
212
213EXPORT_SYMBOL(dma_unmap_page);
214
215void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
216 enum dma_data_direction direction)
217{
218 unsigned long addr;
219 int i;
220
221 BUG_ON(direction == DMA_NONE);
222
1da177e4 223 for (i = 0; i < nhwentries; i++, sg++) {
9a88cbb5
RB
224 if (!plat_device_is_coherent(dev) &&
225 direction != DMA_TO_DEVICE) {
58b053e4 226 addr = (unsigned long) sg_virt(sg);
9a88cbb5 227 if (addr)
58b053e4 228 __dma_sync(addr, sg->length, direction);
9a88cbb5
RB
229 }
230 plat_unmap_dma_mem(sg->dma_address);
1da177e4
LT
231 }
232}
233
234EXPORT_SYMBOL(dma_unmap_sg);
235
236void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
237 size_t size, enum dma_data_direction direction)
238{
1da177e4 239 BUG_ON(direction == DMA_NONE);
42a3b4f2 240
9a88cbb5
RB
241 if (cpu_is_noncoherent_r10000(dev)) {
242 unsigned long addr;
243
c9d06962 244 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
245 __dma_sync(addr, size, direction);
246 }
1da177e4
LT
247}
248
249EXPORT_SYMBOL(dma_sync_single_for_cpu);
250
251void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
252 size_t size, enum dma_data_direction direction)
253{
1da177e4
LT
254 BUG_ON(direction == DMA_NONE);
255
9b43fb6b 256 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
257 unsigned long addr;
258
c9d06962 259 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
260 __dma_sync(addr, size, direction);
261 }
1da177e4
LT
262}
263
264EXPORT_SYMBOL(dma_sync_single_for_device);
265
266void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
267 unsigned long offset, size_t size, enum dma_data_direction direction)
268{
1da177e4
LT
269 BUG_ON(direction == DMA_NONE);
270
9a88cbb5
RB
271 if (cpu_is_noncoherent_r10000(dev)) {
272 unsigned long addr;
273
c9d06962 274 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
275 __dma_sync(addr + offset, size, direction);
276 }
1da177e4
LT
277}
278
279EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
280
281void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
282 unsigned long offset, size_t size, enum dma_data_direction direction)
283{
1da177e4
LT
284 BUG_ON(direction == DMA_NONE);
285
9b43fb6b 286 if (!plat_device_is_coherent(dev)) {
9a88cbb5
RB
287 unsigned long addr;
288
c9d06962 289 addr = dma_addr_to_virt(dma_handle);
9a88cbb5
RB
290 __dma_sync(addr + offset, size, direction);
291 }
1da177e4
LT
292}
293
294EXPORT_SYMBOL(dma_sync_single_range_for_device);
295
296void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
297 enum dma_data_direction direction)
298{
299 int i;
42a3b4f2 300
1da177e4 301 BUG_ON(direction == DMA_NONE);
42a3b4f2 302
1da177e4 303 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5 304 for (i = 0; i < nelems; i++, sg++) {
5b648a98 305 if (cpu_is_noncoherent_r10000(dev))
58b053e4 306 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5
RB
307 sg->length, direction);
308 plat_unmap_dma_mem(sg->dma_address);
309 }
1da177e4
LT
310}
311
312EXPORT_SYMBOL(dma_sync_sg_for_cpu);
313
314void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
315 enum dma_data_direction direction)
316{
317 int i;
318
319 BUG_ON(direction == DMA_NONE);
320
321 /* Make sure that gcc doesn't leave the empty loop body. */
9a88cbb5
RB
322 for (i = 0; i < nelems; i++, sg++) {
323 if (!plat_device_is_coherent(dev))
58b053e4 324 __dma_sync((unsigned long)page_address(sg_page(sg)),
9a88cbb5
RB
325 sg->length, direction);
326 plat_unmap_dma_mem(sg->dma_address);
327 }
1da177e4
LT
328}
329
330EXPORT_SYMBOL(dma_sync_sg_for_device);
331
332int dma_mapping_error(dma_addr_t dma_addr)
333{
334 return 0;
335}
336
337EXPORT_SYMBOL(dma_mapping_error);
338
339int dma_supported(struct device *dev, u64 mask)
340{
341 /*
342 * we fall back to GFP_DMA when the mask isn't all 1s,
343 * so we can't guarantee allocations that must be
344 * within a tighter range than GFP_DMA..
345 */
346 if (mask < 0x00ffffff)
347 return 0;
348
349 return 1;
350}
351
352EXPORT_SYMBOL(dma_supported);
353
f67637ee 354int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
1da177e4 355{
9a88cbb5 356 return plat_device_is_coherent(dev);
1da177e4
LT
357}
358
359EXPORT_SYMBOL(dma_is_consistent);
360
d3fa72e4 361void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
9a88cbb5 362 enum dma_data_direction direction)
1da177e4 363{
9a88cbb5 364 BUG_ON(direction == DMA_NONE);
1da177e4 365
9a88cbb5
RB
366 if (!plat_device_is_coherent(dev))
367 dma_cache_wback_inv((unsigned long)vaddr, size);
1da177e4
LT
368}
369
370EXPORT_SYMBOL(dma_cache_sync);