]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/pci-dma.c
x86: don't do dma if mask is NULL.
[net-next-2.6.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
cb5867a5 2#include <linux/dmar.h>
116890d5 3#include <linux/bootmem.h>
bca5c096 4#include <linux/pci.h>
cb5867a5 5
116890d5
GC
6#include <asm/proto.h>
7#include <asm/dma.h>
cb5867a5
GC
8#include <asm/gart.h>
9#include <asm/calgary.h>
459121c9 10
bca5c096
GC
11int forbid_dac __read_mostly;
12EXPORT_SYMBOL(forbid_dac);
13
85c246ee
GC
14const struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops);
16
8e0c3797
GC
17int iommu_sac_force __read_mostly = 0;
18
f9c258de
GC
19#ifdef CONFIG_IOMMU_DEBUG
20int panic_on_overflow __read_mostly = 1;
21int force_iommu __read_mostly = 1;
22#else
23int panic_on_overflow __read_mostly = 0;
24int force_iommu __read_mostly = 0;
25#endif
26
fae9a0d8
GC
27int iommu_merge __read_mostly = 0;
28
29int no_iommu __read_mostly;
30/* Set this to 1 if there is a HW IOMMU in the system */
31int iommu_detected __read_mostly = 0;
32
33/* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35int iommu_bio_merge __read_mostly = 0;
36EXPORT_SYMBOL(iommu_bio_merge);
37
cac67877
GC
38dma_addr_t bad_dma_address __read_mostly = 0;
39EXPORT_SYMBOL(bad_dma_address);
fae9a0d8 40
459121c9
GC
41int dma_set_mask(struct device *dev, u64 mask)
42{
43 if (!dev->dma_mask || !dma_supported(dev, mask))
44 return -EIO;
45
46 *dev->dma_mask = mask;
47
48 return 0;
49}
50EXPORT_SYMBOL(dma_set_mask);
51
116890d5
GC
52#ifdef CONFIG_X86_64
53static __initdata void *dma32_bootmem_ptr;
54static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
55
56static int __init parse_dma32_size_opt(char *p)
57{
58 if (!p)
59 return -EINVAL;
60 dma32_bootmem_size = memparse(p, &p);
61 return 0;
62}
63early_param("dma32_size", parse_dma32_size_opt);
64
65void __init dma32_reserve_bootmem(void)
66{
67 unsigned long size, align;
68 if (end_pfn <= MAX_DMA32_PFN)
69 return;
70
71 align = 64ULL<<20;
72 size = round_up(dma32_bootmem_size, align);
73 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
74 __pa(MAX_DMA_ADDRESS));
75 if (dma32_bootmem_ptr)
76 dma32_bootmem_size = size;
77 else
78 dma32_bootmem_size = 0;
79}
80static void __init dma32_free_bootmem(void)
81{
82 int node;
83
84 if (end_pfn <= MAX_DMA32_PFN)
85 return;
86
87 if (!dma32_bootmem_ptr)
88 return;
89
90 for_each_online_node(node)
91 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
92 dma32_bootmem_size);
93
94 dma32_bootmem_ptr = NULL;
95 dma32_bootmem_size = 0;
96}
97
98void __init pci_iommu_alloc(void)
99{
100 /* free the range so iommu could get some range less than 4G */
101 dma32_free_bootmem();
102 /*
103 * The order of these functions is important for
104 * fall-back/fail-over reasons
105 */
106#ifdef CONFIG_GART_IOMMU
107 gart_iommu_hole_init();
108#endif
109
110#ifdef CONFIG_CALGARY_IOMMU
111 detect_calgary();
112#endif
113
114 detect_intel_iommu();
115
116#ifdef CONFIG_SWIOTLB
117 pci_swiotlb_init();
118#endif
119}
120#endif
121
fae9a0d8
GC
122/*
123 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
124 * documentation.
125 */
126static __init int iommu_setup(char *p)
127{
128 iommu_merge = 1;
129
130 if (!p)
131 return -EINVAL;
132
133 while (*p) {
134 if (!strncmp(p, "off", 3))
135 no_iommu = 1;
136 /* gart_parse_options has more force support */
137 if (!strncmp(p, "force", 5))
138 force_iommu = 1;
139 if (!strncmp(p, "noforce", 7)) {
140 iommu_merge = 0;
141 force_iommu = 0;
142 }
143
144 if (!strncmp(p, "biomerge", 8)) {
145 iommu_bio_merge = 4096;
146 iommu_merge = 1;
147 force_iommu = 1;
148 }
149 if (!strncmp(p, "panic", 5))
150 panic_on_overflow = 1;
151 if (!strncmp(p, "nopanic", 7))
152 panic_on_overflow = 0;
153 if (!strncmp(p, "merge", 5)) {
154 iommu_merge = 1;
155 force_iommu = 1;
156 }
157 if (!strncmp(p, "nomerge", 7))
158 iommu_merge = 0;
159 if (!strncmp(p, "forcesac", 8))
160 iommu_sac_force = 1;
161 if (!strncmp(p, "allowdac", 8))
162 forbid_dac = 0;
163 if (!strncmp(p, "nodac", 5))
164 forbid_dac = -1;
165 if (!strncmp(p, "usedac", 6)) {
166 forbid_dac = -1;
167 return 1;
168 }
169#ifdef CONFIG_SWIOTLB
170 if (!strncmp(p, "soft", 4))
171 swiotlb = 1;
172#endif
173
174#ifdef CONFIG_GART_IOMMU
175 gart_parse_options(p);
176#endif
177
178#ifdef CONFIG_CALGARY_IOMMU
179 if (!strncmp(p, "calgary", 7))
180 use_calgary = 1;
181#endif /* CONFIG_CALGARY_IOMMU */
182
183 p += strcspn(p, ",");
184 if (*p == ',')
185 ++p;
186 }
187 return 0;
188}
189early_param("iommu", iommu_setup);
190
8e8edc64
GC
191#ifdef CONFIG_X86_32
192int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
193 dma_addr_t device_addr, size_t size, int flags)
194{
195 void __iomem *mem_base = NULL;
196 int pages = size >> PAGE_SHIFT;
197 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
198
199 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
200 goto out;
201 if (!size)
202 goto out;
203 if (dev->dma_mem)
204 goto out;
205
206 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
207
208 mem_base = ioremap(bus_addr, size);
209 if (!mem_base)
210 goto out;
211
212 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
213 if (!dev->dma_mem)
214 goto out;
215 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
216 if (!dev->dma_mem->bitmap)
217 goto free1_out;
218
219 dev->dma_mem->virt_base = mem_base;
220 dev->dma_mem->device_base = device_addr;
221 dev->dma_mem->size = pages;
222 dev->dma_mem->flags = flags;
223
224 if (flags & DMA_MEMORY_MAP)
225 return DMA_MEMORY_MAP;
226
227 return DMA_MEMORY_IO;
228
229 free1_out:
230 kfree(dev->dma_mem);
231 out:
232 if (mem_base)
233 iounmap(mem_base);
234 return 0;
235}
236EXPORT_SYMBOL(dma_declare_coherent_memory);
237
238void dma_release_declared_memory(struct device *dev)
239{
240 struct dma_coherent_mem *mem = dev->dma_mem;
241
242 if (!mem)
243 return;
244 dev->dma_mem = NULL;
245 iounmap(mem->virt_base);
246 kfree(mem->bitmap);
247 kfree(mem);
248}
249EXPORT_SYMBOL(dma_release_declared_memory);
250
251void *dma_mark_declared_memory_occupied(struct device *dev,
252 dma_addr_t device_addr, size_t size)
253{
254 struct dma_coherent_mem *mem = dev->dma_mem;
255 int pos, err;
256 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
257
258 pages >>= PAGE_SHIFT;
259
260 if (!mem)
261 return ERR_PTR(-EINVAL);
262
263 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
264 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
265 if (err != 0)
266 return ERR_PTR(err);
267 return mem->virt_base + (pos << PAGE_SHIFT);
268}
269EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
270#endif /* CONFIG_X86_32 */
271
8e0c3797
GC
272int dma_supported(struct device *dev, u64 mask)
273{
274#ifdef CONFIG_PCI
275 if (mask > 0xffffffff && forbid_dac > 0) {
276 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
277 dev->bus_id);
278 return 0;
279 }
280#endif
281
282 if (dma_ops->dma_supported)
283 return dma_ops->dma_supported(dev, mask);
284
285 /* Copied from i386. Doesn't make much sense, because it will
286 only work for pci_alloc_coherent.
287 The caller just has to use GFP_DMA in this case. */
288 if (mask < DMA_24BIT_MASK)
289 return 0;
290
291 /* Tell the device to use SAC when IOMMU force is on. This
292 allows the driver to use cheaper accesses in some cases.
293
294 Problem with this is that if we overflow the IOMMU area and
295 return DAC as fallback address the device may not handle it
296 correctly.
297
298 As a special case some controllers have a 39bit address
299 mode that is as efficient as 32bit (aic79xx). Don't force
300 SAC for these. Assume all masks <= 40 bits are of this
301 type. Normally this doesn't make any difference, but gives
302 more gentle handling of IOMMU overflow. */
303 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
304 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
305 dev->bus_id, mask);
306 return 0;
307 }
308
309 return 1;
310}
311EXPORT_SYMBOL(dma_supported);
312
313
cb5867a5
GC
314static int __init pci_iommu_init(void)
315{
316#ifdef CONFIG_CALGARY_IOMMU
317 calgary_iommu_init();
318#endif
319
320 intel_iommu_init();
321
322#ifdef CONFIG_GART_IOMMU
323 gart_iommu_init();
324#endif
459121c9 325
cb5867a5
GC
326 no_iommu_init();
327 return 0;
328}
329
330void pci_iommu_shutdown(void)
331{
332 gart_iommu_shutdown();
333}
334/* Must execute after PCI subsystem */
335fs_initcall(pci_iommu_init);
bca5c096
GC
336
337#ifdef CONFIG_PCI
338/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
339
340static __devinit void via_no_dac(struct pci_dev *dev)
341{
342 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
343 printk(KERN_INFO "PCI: VIA PCI bridge detected."
344 "Disabling DAC.\n");
345 forbid_dac = 1;
346 }
347}
348DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
349#endif