]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/include/asm/dma-mapping.h
x86/PCI: Adjust GFP mask handling for coherent allocations
[net-next-2.6.git] / arch / x86 / include / asm / dma-mapping.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
6f536635
GC
3
4/*
5872fb94
RD
5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
6f536635
GC
7 */
8
d7002857 9#include <linux/kmemcheck.h>
6f536635 10#include <linux/scatterlist.h>
2118d0c5 11#include <linux/dma-debug.h>
abe6602b 12#include <linux/dma-attrs.h>
6f536635
GC
13#include <asm/io.h>
14#include <asm/swiotlb.h>
6c505ce3 15#include <asm-generic/dma-coherent.h>
6f536635 16
eb647138
JB
17#ifdef CONFIG_ISA
18# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
19#else
20# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
21#endif
22
7c183416 23extern dma_addr_t bad_dma_address;
b7107a3d 24extern int iommu_merge;
6c505ce3 25extern struct device x86_dma_fallback_dev;
b7107a3d 26extern int panic_on_overflow;
7c183416 27
160c1d8e
FT
28extern struct dma_map_ops *dma_ops;
29
30static inline struct dma_map_ops *get_dma_ops(struct device *dev)
c786df08 31{
8d8bb39b
FT
32#ifdef CONFIG_X86_32
33 return dma_ops;
34#else
35 if (unlikely(!dev) || !dev->archdata.dma_ops)
36 return dma_ops;
37 else
38 return dev->archdata.dma_ops;
cfb80c9e 39#endif
8d8bb39b
FT
40}
41
7c095e46
FT
42#include <asm-generic/dma-mapping-common.h>
43
8d8bb39b
FT
44/* Make sure we keep the same behaviour */
45static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
46{
160c1d8e 47 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b
FT
48 if (ops->mapping_error)
49 return ops->mapping_error(dev, dma_addr);
c786df08 50
7b1dedca 51 return (dma_addr == bad_dma_address);
c786df08
GC
52}
53
8d396ded
GC
54#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
55#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
6c505ce3 56#define dma_is_consistent(d, h) (1)
8d396ded 57
802c1f66
GC
58extern int dma_supported(struct device *hwdev, u64 mask);
59extern int dma_set_mask(struct device *dev, u64 mask);
60
9f6ac577
FT
61extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
62 dma_addr_t *dma_addr, gfp_t flag);
63
99becaca
FT
64static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
65{
66 if (!dev->dma_mask)
67 return 0;
68
69 return addr + size <= *dev->dma_mask;
70}
71
8d4f5339
FT
72static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
73{
74 return paddr;
75}
76
77static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
78{
79 return daddr;
80}
81
3cb6a917
GC
82static inline void
83dma_cache_sync(struct device *dev, void *vaddr, size_t size,
84 enum dma_data_direction dir)
85{
86 flush_write_buffers();
87}
ae17a63b 88
b7107a3d
GC
89static inline int dma_get_cache_alignment(void)
90{
91 /* no easy way to get cache size on all x86, so return the
92 * maximum possible, to be safe */
93 return boot_cpu_data.x86_clflush_size;
94}
95
823e7e8c
FT
96static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
97 gfp_t gfp)
98{
99 unsigned long dma_mask = 0;
b7107a3d 100
823e7e8c
FT
101 dma_mask = dev->coherent_dma_mask;
102 if (!dma_mask)
2f4f27d4 103 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
823e7e8c
FT
104
105 return dma_mask;
106}
107
108static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
109{
110 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
111
2f4f27d4 112 if (dma_mask <= DMA_BIT_MASK(24))
75bebb7f
FT
113 gfp |= GFP_DMA;
114#ifdef CONFIG_X86_64
284901a9 115 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
823e7e8c
FT
116 gfp |= GFP_DMA32;
117#endif
118 return gfp;
119}
120
6c505ce3
JR
121static inline void *
122dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
123 gfp_t gfp)
124{
160c1d8e 125 struct dma_map_ops *ops = get_dma_ops(dev);
6c505ce3
JR
126 void *memory;
127
8a53ad67
FT
128 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
129
6c505ce3
JR
130 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
131 return memory;
132
eb647138 133 if (!dev)
6c505ce3 134 dev = &x86_dma_fallback_dev;
6c505ce3 135
98216260 136 if (!is_device_dma_capable(dev))
de9f521f
FT
137 return NULL;
138
823e7e8c
FT
139 if (!ops->alloc_coherent)
140 return NULL;
141
2118d0c5
JR
142 memory = ops->alloc_coherent(dev, size, dma_handle,
143 dma_alloc_coherent_gfp_flags(dev, gfp));
144 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
145
146 return memory;
6c505ce3
JR
147}
148
149static inline void dma_free_coherent(struct device *dev, size_t size,
150 void *vaddr, dma_addr_t bus)
151{
160c1d8e 152 struct dma_map_ops *ops = get_dma_ops(dev);
6c505ce3
JR
153
154 WARN_ON(irqs_disabled()); /* for portability */
155
156 if (dma_release_from_coherent(dev, get_order(size), vaddr))
157 return;
158
2118d0c5 159 debug_dma_free_coherent(dev, size, vaddr, bus);
6c505ce3
JR
160 if (ops->free_coherent)
161 ops->free_coherent(dev, size, vaddr, bus);
162}
b7107a3d 163
6f536635 164#endif