]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/parisc/include/asm/io.h
parisc: move include/asm-parisc to arch/parisc/include/asm
[net-next-2.6.git] / arch / parisc / include / asm / io.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_IO_H
2#define _ASM_IO_H
3
1da177e4
LT
4#include <linux/types.h>
5#include <asm/pgtable.h>
6
7extern unsigned long parisc_vmerge_boundary;
8extern unsigned long parisc_vmerge_max_size;
9
10#define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary
11#define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size
12
13#define virt_to_phys(a) ((unsigned long)__pa(a))
14#define phys_to_virt(a) __va(a)
15#define virt_to_bus virt_to_phys
16#define bus_to_virt phys_to_virt
17
fabb8ff4
KM
18static inline unsigned long isa_bus_to_virt(unsigned long addr) {
19 BUG();
20 return 0;
21}
22
23static inline unsigned long isa_virt_to_bus(void *addr) {
24 BUG();
25 return 0;
26}
27
1da177e4
LT
28/*
29 * Memory mapped I/O
30 *
31 * readX()/writeX() do byteswapping and take an ioremapped address
32 * __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address.
33 * gsc_*() don't byteswap and operate on physical addresses;
34 * eg dev->hpa or 0xfee00000.
35 */
36
1da177e4
LT
37static inline unsigned char gsc_readb(unsigned long addr)
38{
39 long flags;
40 unsigned char ret;
41
1da177e4
LT
42 __asm__ __volatile__(
43 " rsm 2,%0\n"
44 " ldbx 0(%2),%1\n"
45 " mtsm %0\n"
46 : "=&r" (flags), "=r" (ret) : "r" (addr) );
47
48 return ret;
49}
50
51static inline unsigned short gsc_readw(unsigned long addr)
52{
53 long flags;
54 unsigned short ret;
55
1da177e4
LT
56 __asm__ __volatile__(
57 " rsm 2,%0\n"
58 " ldhx 0(%2),%1\n"
59 " mtsm %0\n"
60 : "=&r" (flags), "=r" (ret) : "r" (addr) );
61
62 return ret;
63}
64
65static inline unsigned int gsc_readl(unsigned long addr)
66{
67 u32 ret;
68
1da177e4
LT
69 __asm__ __volatile__(
70 " ldwax 0(%1),%0\n"
71 : "=r" (ret) : "r" (addr) );
72
73 return ret;
74}
75
76static inline unsigned long long gsc_readq(unsigned long addr)
77{
78 unsigned long long ret;
1da177e4 79
513e7ecd 80#ifdef CONFIG_64BIT
1da177e4
LT
81 __asm__ __volatile__(
82 " ldda 0(%1),%0\n"
83 : "=r" (ret) : "r" (addr) );
84#else
85 /* two reads may have side effects.. */
86 ret = ((u64) gsc_readl(addr)) << 32;
87 ret |= gsc_readl(addr+4);
88#endif
89 return ret;
90}
91
92static inline void gsc_writeb(unsigned char val, unsigned long addr)
93{
94 long flags;
1da177e4
LT
95 __asm__ __volatile__(
96 " rsm 2,%0\n"
97 " stbs %1,0(%2)\n"
98 " mtsm %0\n"
99 : "=&r" (flags) : "r" (val), "r" (addr) );
100}
101
102static inline void gsc_writew(unsigned short val, unsigned long addr)
103{
104 long flags;
1da177e4
LT
105 __asm__ __volatile__(
106 " rsm 2,%0\n"
107 " sths %1,0(%2)\n"
108 " mtsm %0\n"
109 : "=&r" (flags) : "r" (val), "r" (addr) );
110}
111
112static inline void gsc_writel(unsigned int val, unsigned long addr)
113{
1da177e4
LT
114 __asm__ __volatile__(
115 " stwas %0,0(%1)\n"
116 : : "r" (val), "r" (addr) );
117}
118
119static inline void gsc_writeq(unsigned long long val, unsigned long addr)
120{
513e7ecd 121#ifdef CONFIG_64BIT
1da177e4
LT
122 __asm__ __volatile__(
123 " stda %0,0(%1)\n"
124 : : "r" (val), "r" (addr) );
125#else
126 /* two writes may have side effects.. */
127 gsc_writel(val >> 32, addr);
128 gsc_writel(val, addr+4);
129#endif
130}
131
132/*
133 * The standard PCI ioremap interfaces
134 */
135
136extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
137
1b52d7c2
KM
138/* Most machines react poorly to I/O-space being cacheable... Instead let's
139 * define ioremap() in terms of ioremap_nocache().
1da177e4 140 */
f13cec84 141static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
1da177e4 142{
1b52d7c2 143 return __ioremap(offset, size, _PAGE_NO_CACHE);
1da177e4 144}
1b52d7c2 145#define ioremap_nocache(off, sz) ioremap((off), (sz))
1da177e4 146
01232e93 147extern void iounmap(const volatile void __iomem *addr);
1da177e4 148
1da177e4
LT
149static inline unsigned char __raw_readb(const volatile void __iomem *addr)
150{
151 return (*(volatile unsigned char __force *) (addr));
152}
153static inline unsigned short __raw_readw(const volatile void __iomem *addr)
154{
155 return *(volatile unsigned short __force *) addr;
156}
157static inline unsigned int __raw_readl(const volatile void __iomem *addr)
158{
159 return *(volatile unsigned int __force *) addr;
160}
161static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
162{
163 return *(volatile unsigned long long __force *) addr;
164}
165
166static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
167{
168 *(volatile unsigned char __force *) addr = b;
169}
170static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
171{
172 *(volatile unsigned short __force *) addr = b;
173}
174static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
175{
176 *(volatile unsigned int __force *) addr = b;
177}
178static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
179{
180 *(volatile unsigned long long __force *) addr = b;
181}
1da177e4
LT
182
183/* readb can never be const, so use __fswab instead of le*_to_cpu */
184#define readb(addr) __raw_readb(addr)
185#define readw(addr) __fswab16(__raw_readw(addr))
186#define readl(addr) __fswab32(__raw_readl(addr))
187#define readq(addr) __fswab64(__raw_readq(addr))
188#define writeb(b, addr) __raw_writeb(b, addr)
189#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
190#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
191#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
192
193#define readb_relaxed(addr) readb(addr)
194#define readw_relaxed(addr) readw(addr)
195#define readl_relaxed(addr) readl(addr)
196#define readq_relaxed(addr) readq(addr)
197
198#define mmiowb() do { } while (0)
199
200void memset_io(volatile void __iomem *addr, unsigned char val, int count);
201void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
202void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
203
1da177e4
LT
204/* Port-space IO */
205
206#define inb_p inb
207#define inw_p inw
208#define inl_p inl
209#define outb_p outb
210#define outw_p outw
211#define outl_p outl
212
213extern unsigned char eisa_in8(unsigned short port);
214extern unsigned short eisa_in16(unsigned short port);
215extern unsigned int eisa_in32(unsigned short port);
216extern void eisa_out8(unsigned char data, unsigned short port);
217extern void eisa_out16(unsigned short data, unsigned short port);
218extern void eisa_out32(unsigned int data, unsigned short port);
219
220#if defined(CONFIG_PCI)
221extern unsigned char inb(int addr);
222extern unsigned short inw(int addr);
223extern unsigned int inl(int addr);
224
225extern void outb(unsigned char b, int addr);
226extern void outw(unsigned short b, int addr);
227extern void outl(unsigned int b, int addr);
228#elif defined(CONFIG_EISA)
229#define inb eisa_in8
230#define inw eisa_in16
231#define inl eisa_in32
232#define outb eisa_out8
233#define outw eisa_out16
234#define outl eisa_out32
235#else
236static inline char inb(unsigned long addr)
237{
238 BUG();
239 return -1;
240}
241
242static inline short inw(unsigned long addr)
243{
244 BUG();
245 return -1;
246}
247
248static inline int inl(unsigned long addr)
249{
250 BUG();
251 return -1;
252}
253
254#define outb(x, y) BUG()
255#define outw(x, y) BUG()
256#define outl(x, y) BUG()
257#endif
258
259/*
260 * String versions of in/out ops:
261 */
262extern void insb (unsigned long port, void *dst, unsigned long count);
263extern void insw (unsigned long port, void *dst, unsigned long count);
264extern void insl (unsigned long port, void *dst, unsigned long count);
265extern void outsb (unsigned long port, const void *src, unsigned long count);
266extern void outsw (unsigned long port, const void *src, unsigned long count);
267extern void outsl (unsigned long port, const void *src, unsigned long count);
268
269
270/* IO Port space is : BBiiii where BB is HBA number. */
271#define IO_SPACE_LIMIT 0x00ffffff
272
1da177e4
LT
273/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
274 * bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
275 * mode (essentially just sign extending. This macro takes in a 32
276 * bit I/O address (still with the leading f) and outputs the correct
277 * value for either 32 or 64 bit mode */
278#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
279
280#include <asm-generic/iomap.h>
281
282/*
283 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
284 * access
285 */
286#define xlate_dev_mem_ptr(p) __va(p)
287
288/*
289 * Convert a virtual cached pointer to an uncached pointer
290 */
291#define xlate_dev_kmem_ptr(p) p
292
293#endif