]>
Commit | Line | Data |
---|---|---|
9a88cbb5 RB |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> | |
7 | * | |
8 | */ | |
ab012ebf K |
9 | #ifndef __ASM_MACH_IP32_DMA_COHERENCE_H |
10 | #define __ASM_MACH_IP32_DMA_COHERENCE_H | |
9a88cbb5 RB |
11 | |
12 | #include <asm/ip32/crime.h> | |
13 | ||
14 | struct device; | |
15 | ||
16 | /* | |
17 | * Few notes. | |
18 | * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M | |
19 | * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for | |
20 | * native-endian) | |
21 | * 3. All other devices see memory as one big chunk at 0x40000000 | |
22 | * 4. Non-PCI devices will pass NULL as struct device* | |
23 | * | |
24 | * Thus we translate differently, depending on device. | |
25 | */ | |
26 | ||
27 | #define RAM_OFFSET_MASK 0x3fffffffUL | |
28 | ||
f1dbf8e7 RB |
29 | static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, |
30 | size_t size) | |
9a88cbb5 RB |
31 | { |
32 | dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK; | |
33 | ||
34 | if (dev == NULL) | |
35 | pa += CRIME_HI_MEM_BASE; | |
36 | ||
37 | return pa; | |
38 | } | |
39 | ||
48e1fd5a DD |
40 | static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, |
41 | struct page *page) | |
9a88cbb5 RB |
42 | { |
43 | dma_addr_t pa; | |
44 | ||
45 | pa = page_to_phys(page) & RAM_OFFSET_MASK; | |
46 | ||
47 | if (dev == NULL) | |
48 | pa += CRIME_HI_MEM_BASE; | |
49 | ||
50 | return pa; | |
51 | } | |
52 | ||
53 | /* This is almost certainly wrong but it's what dma-ip32.c used to use */ | |
43e4f7ae | 54 | static inline unsigned long plat_dma_addr_to_phys(struct device *dev, |
3807ef3f | 55 | dma_addr_t dma_addr) |
9a88cbb5 RB |
56 | { |
57 | unsigned long addr = dma_addr & RAM_OFFSET_MASK; | |
58 | ||
59 | if (dma_addr >= 256*1024*1024) | |
60 | addr += CRIME_HI_MEM_BASE; | |
61 | ||
62 | return addr; | |
63 | } | |
64 | ||
d3f634b9 KC |
65 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, |
66 | size_t size, enum dma_data_direction direction) | |
9a88cbb5 RB |
67 | { |
68 | } | |
69 | ||
843aef49 DD |
70 | static inline int plat_dma_supported(struct device *dev, u64 mask) |
71 | { | |
72 | /* | |
73 | * we fall back to GFP_DMA when the mask isn't all 1s, | |
74 | * so we can't guarantee allocations that must be | |
75 | * within a tighter range than GFP_DMA.. | |
76 | */ | |
77 | if (mask < DMA_BIT_MASK(24)) | |
78 | return 0; | |
79 | ||
80 | return 1; | |
81 | } | |
82 | ||
83 | static inline void plat_extra_sync_for_device(struct device *dev) | |
84 | { | |
85 | return; | |
86 | } | |
87 | ||
88 | static inline int plat_dma_mapping_error(struct device *dev, | |
89 | dma_addr_t dma_addr) | |
90 | { | |
91 | return 0; | |
92 | } | |
93 | ||
9a88cbb5 RB |
94 | static inline int plat_device_is_coherent(struct device *dev) |
95 | { | |
96 | return 0; /* IP32 is non-cohernet */ | |
97 | } | |
98 | ||
ab012ebf | 99 | #endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */ |