]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/blackfin/include/asm/dma-mapping.h
dma-mapping: unify dma_get_cache_alignment implementations
[net-next-2.6.git] / arch / blackfin / include / asm / dma-mapping.h
CommitLineData
96f1050d
RG
1/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
1394f032
BW
7#ifndef _BLACKFIN_DMA_MAPPING_H
8#define _BLACKFIN_DMA_MAPPING_H
9
dd3b0e3e
BS
10#include <asm/cacheflush.h>
11struct scatterlist;
1394f032 12
1394f032
BW
13void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp);
15void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle);
17
18/*
19 * Now for the API extensions over the pci_ one
20 */
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
dd3b0e3e 23#define dma_supported(d, m) (1)
dd3b0e3e 24#define dma_is_consistent(d, h) (1)
1394f032 25
dd3b0e3e
BS
26static inline int
27dma_set_mask(struct device *dev, u64 dma_mask)
62273eeb 28{
dd3b0e3e
BS
29 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
30 return -EIO;
31
32 *dev->dma_mask = dma_mask;
33
62273eeb
MF
34 return 0;
35}
334280ff 36
dd3b0e3e
BS
37static inline int
38dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
39{
40 return 0;
41}
42
43extern void
44__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
45static inline void
a3a6a590 46__dma_sync_inline(dma_addr_t addr, size_t size, enum dma_data_direction dir)
dd3b0e3e 47{
dd3b0e3e
BS
48 switch (dir) {
49 case DMA_NONE:
50 BUG();
51 case DMA_TO_DEVICE: /* writeback only */
52 flush_dcache_range(addr, addr + size);
53 break;
54 case DMA_FROM_DEVICE: /* invalidate only */
55 case DMA_BIDIRECTIONAL: /* flush and invalidate */
56 /* Blackfin has no dedicated invalidate (it includes a flush) */
57 invalidate_dcache_range(addr, addr + size);
58 break;
59 }
60}
a3a6a590
SZ
61static inline void
62_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
63{
64 if (__builtin_constant_p(dir))
65 __dma_sync_inline(addr, size, dir);
66 else
67 __dma_sync(addr, size, dir);
68}
dd3b0e3e 69
dd3b0e3e
BS
70static inline dma_addr_t
71dma_map_single(struct device *dev, void *ptr, size_t size,
72 enum dma_data_direction dir)
73{
74 _dma_sync((dma_addr_t)ptr, size, dir);
75 return (dma_addr_t) ptr;
76}
1394f032 77
9fcdc78c
BW
78static inline dma_addr_t
79dma_map_page(struct device *dev, struct page *page,
80 unsigned long offset, size_t size,
81 enum dma_data_direction dir)
82{
83 return dma_map_single(dev, page_address(page) + offset, size, dir);
84}
85
dd3b0e3e
BS
86static inline void
87dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
88 enum dma_data_direction dir)
89{
90 BUG_ON(!valid_dma_direction(dir));
91}
1394f032 92
9fcdc78c
BW
93static inline void
94dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
95 enum dma_data_direction dir)
96{
97 dma_unmap_single(dev, dma_addr, size, dir);
98}
99
1394f032 100extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dd3b0e3e 101 enum dma_data_direction dir);
1394f032 102
dd3b0e3e
BS
103static inline void
104dma_unmap_sg(struct device *dev, struct scatterlist *sg,
105 int nhwentries, enum dma_data_direction dir)
106{
107 BUG_ON(!valid_dma_direction(dir));
108}
1394f032 109
dd3b0e3e
BS
110static inline void
111dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
112 unsigned long offset, size_t size,
113 enum dma_data_direction dir)
31f3d4a3 114{
dd3b0e3e 115 BUG_ON(!valid_dma_direction(dir));
31f3d4a3
BW
116}
117
dd3b0e3e
BS
118static inline void
119dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
120 unsigned long offset, size_t size,
121 enum dma_data_direction dir)
31f3d4a3 122{
dd3b0e3e 123 _dma_sync(handle + offset, size, dir);
31f3d4a3 124}
42b86e06 125
dd3b0e3e
BS
126static inline void
127dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
128 enum dma_data_direction dir)
42b86e06 129{
dd3b0e3e 130 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
42b86e06
FT
131}
132
dd3b0e3e
BS
133static inline void
134dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
135 enum dma_data_direction dir)
136{
137 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
138}
139
140static inline void
141dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
142 enum dma_data_direction dir)
143{
144 BUG_ON(!valid_dma_direction(dir));
145}
146
147extern void
148dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
149 int nents, enum dma_data_direction dir);
150
151static inline void
152dma_cache_sync(struct device *dev, void *vaddr, size_t size,
153 enum dma_data_direction dir)
42b86e06 154{
dd3b0e3e 155 _dma_sync((dma_addr_t)vaddr, size, dir);
42b86e06
FT
156}
157
1394f032 158#endif /* _BLACKFIN_DMA_MAPPING_H */