]>
Commit | Line | Data |
---|---|---|
9a4048a2 GU |
1 | #ifndef _M68K_DMA_MAPPING_H |
2 | #define _M68K_DMA_MAPPING_H | |
3 | ||
4 | #include <asm/cache.h> | |
5 | ||
6 | struct scatterlist; | |
7 | ||
8 | #ifndef CONFIG_MMU_SUN3 | |
9 | static inline int dma_supported(struct device *dev, u64 mask) | |
10 | { | |
11 | return 1; | |
12 | } | |
13 | ||
14 | static inline int dma_set_mask(struct device *dev, u64 mask) | |
15 | { | |
16 | return 0; | |
17 | } | |
18 | ||
9a4048a2 GU |
19 | static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
20 | { | |
21 | return 0; | |
22 | } | |
23 | ||
24 | extern void *dma_alloc_coherent(struct device *, size_t, | |
25 | dma_addr_t *, gfp_t); | |
26 | extern void dma_free_coherent(struct device *, size_t, | |
27 | void *, dma_addr_t); | |
28 | ||
29 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, | |
30 | dma_addr_t *handle, gfp_t flag) | |
31 | { | |
32 | return dma_alloc_coherent(dev, size, handle, flag); | |
33 | } | |
34 | static inline void dma_free_noncoherent(struct device *dev, size_t size, | |
35 | void *addr, dma_addr_t handle) | |
36 | { | |
37 | dma_free_coherent(dev, size, addr, handle); | |
38 | } | |
39 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | |
40 | enum dma_data_direction dir) | |
41 | { | |
42 | /* we use coherent allocation, so not much to do here. */ | |
43 | } | |
44 | ||
45 | extern dma_addr_t dma_map_single(struct device *, void *, size_t, | |
46 | enum dma_data_direction); | |
47 | static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, | |
48 | size_t size, enum dma_data_direction dir) | |
49 | { | |
50 | } | |
51 | ||
52 | extern dma_addr_t dma_map_page(struct device *, struct page *, | |
53 | unsigned long, size_t size, | |
54 | enum dma_data_direction); | |
55 | static inline void dma_unmap_page(struct device *dev, dma_addr_t address, | |
56 | size_t size, enum dma_data_direction dir) | |
57 | { | |
58 | } | |
59 | ||
60 | extern int dma_map_sg(struct device *, struct scatterlist *, int, | |
61 | enum dma_data_direction); | |
62 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
63 | int nhwentries, enum dma_data_direction dir) | |
64 | { | |
65 | } | |
66 | ||
67 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t, | |
68 | enum dma_data_direction); | |
69 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | |
70 | enum dma_data_direction); | |
71 | ||
72 | static inline void dma_sync_single_range_for_device(struct device *dev, | |
73 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
74 | enum dma_data_direction direction) | |
75 | { | |
76 | /* just sync everything for now */ | |
77 | dma_sync_single_for_device(dev, dma_handle, offset + size, direction); | |
78 | } | |
79 | ||
80 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | |
81 | size_t size, enum dma_data_direction dir) | |
82 | { | |
83 | } | |
84 | ||
85 | static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
86 | int nents, enum dma_data_direction dir) | |
87 | { | |
88 | } | |
89 | ||
90 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | |
91 | dma_addr_t dma_handle, unsigned long offset, size_t size, | |
92 | enum dma_data_direction direction) | |
93 | { | |
94 | /* just sync everything for now */ | |
95 | dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); | |
96 | } | |
97 | ||
98 | static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) | |
99 | { | |
100 | return 0; | |
101 | } | |
102 | ||
49148020 | 103 | #else |
9a4048a2 | 104 | #include <asm-generic/dma-mapping-broken.h> |
49148020 | 105 | #endif |
9a4048a2 GU |
106 | |
107 | #endif /* _M68K_DMA_MAPPING_H */ |