]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/pci-nommu.c
dma-mapping: replace all DMA_32BIT_MASK macro with DMA_BIT_MASK(32)
[net-next-2.6.git] / arch / x86 / kernel / pci-nommu.c
CommitLineData
1da177e4
LT
1/* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
8fa3d6fc 3#include <linux/dma-mapping.h>
b922f53b 4#include <linux/scatterlist.h>
1894e367
JSR
5#include <linux/string.h>
6#include <linux/init.h>
7#include <linux/pci.h>
8#include <linux/mm.h>
8fa3d6fc 9
1da177e4 10#include <asm/processor.h>
1894e367 11#include <asm/iommu.h>
17a941d8 12#include <asm/dma.h>
1da177e4 13
17a941d8
MBY
14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
1da177e4 16{
49fbf4e9 17 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
284901a9 18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
f0fdabf8 19 printk(KERN_ERR
8fa3d6fc
AM
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
21 name, (long long)bus, size,
22 (long long)*hwdev->dma_mask);
17a941d8 23 return 0;
1da177e4 24 }
17a941d8
MBY
25 return 1;
26}
1da177e4 27
33feffd4
FT
28static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
29 unsigned long offset, size_t size,
30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
17a941d8 32{
33feffd4 33 dma_addr_t bus = page_to_phys(page) + offset;
5b3e5b72 34 WARN_ON(size == 0);
33feffd4
FT
35 if (!check_addr("map_single", dev, bus, size))
36 return bad_dma_address;
e4dcdd6b 37 flush_write_buffers();
17a941d8 38 return bus;
1da177e4 39}
1da177e4 40
17a941d8
MBY
41/* Map a set of buffers described by scatterlist in streaming
42 * mode for DMA. This is the scatter-gather version of the
43 * above pci_map_single interface. Here the scatter gather list
44 * elements are each tagged with the appropriate dma address
45 * and length. They are obtained via sg_dma_{address,length}(SG).
46 *
47 * NOTE: An implementation may be able to use a smaller number of
48 * DMA address/length pairs than there are SG table elements.
49 * (for example via virtual mapping capabilities)
50 * The routine returns the number of addr/length pairs actually
51 * used, at most nents.
52 *
53 * Device ownership issues as mentioned above for pci_map_single are
54 * the same here.
55 */
1048fa52 56static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
160c1d8e
FT
57 int nents, enum dma_data_direction dir,
58 struct dma_attrs *attrs)
1da177e4 59{
b922f53b 60 struct scatterlist *s;
17a941d8 61 int i;
1da177e4 62
5b3e5b72
GC
63 WARN_ON(nents == 0 || sg[0].length == 0);
64
b922f53b 65 for_each_sg(sg, s, nents, i) {
58b053e4 66 BUG_ON(!sg_page(s));
30db2cbf 67 s->dma_address = sg_phys(s);
17a941d8
MBY
68 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
69 return 0;
70 s->dma_length = s->length;
71 }
e4dcdd6b 72 flush_write_buffers();
17a941d8
MBY
73 return nents;
74}
1da177e4 75
a3a76532
JR
76static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
77 dma_addr_t dma_addr)
78{
79 free_pages((unsigned long)vaddr, get_order(size));
80}
81
160c1d8e 82struct dma_map_ops nommu_dma_ops = {
1894e367
JSR
83 .alloc_coherent = dma_generic_alloc_coherent,
84 .free_coherent = nommu_free_coherent,
1894e367 85 .map_sg = nommu_map_sg,
712b0006 86 .map_page = nommu_map_page,
1894e367 87 .is_phys = 1,
17a941d8 88};
1da177e4 89
17a941d8
MBY
90void __init no_iommu_init(void)
91{
92 if (dma_ops)
93 return;
a166222c
MBY
94
95 force_iommu = 0; /* no HW IOMMU */
17a941d8 96 dma_ops = &nommu_dma_ops;
17a941d8 97}