]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/asm-generic/system.h
Merge branch 'hwpoison-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ak...
[net-next-2.6.git] / include / asm-generic / system.h
CommitLineData
aafe4dbe
AB
1/* Generic system definitions, based on MN10300 definitions.
2 *
3 * It should be possible to use these on really simple architectures,
4 * but it serves more as a starting point for new ports.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence
11 * as published by the Free Software Foundation; either version
12 * 2 of the Licence, or (at your option) any later version.
13 */
14#ifndef __ASM_GENERIC_SYSTEM_H
15#define __ASM_GENERIC_SYSTEM_H
16
17#ifdef __KERNEL__
18#ifndef __ASSEMBLY__
19
20#include <linux/types.h>
21#include <linux/irqflags.h>
22
23#include <asm/cmpxchg-local.h>
24
25struct task_struct;
26
27/* context switching is now performed out-of-line in switch_to.S */
28extern struct task_struct *__switch_to(struct task_struct *,
29 struct task_struct *);
30#define switch_to(prev, next, last) \
31 do { \
32 ((last) = __switch_to((prev), (next))); \
33 } while (0)
34
35#define arch_align_stack(x) (x)
36
37#define nop() asm volatile ("nop")
38
39#endif /* !__ASSEMBLY__ */
40
41/*
42 * Force strict CPU ordering.
43 * And yes, this is required on UP too when we're talking
44 * to devices.
45 *
46 * This implementation only contains a compiler barrier.
47 */
48
49#define mb() asm volatile ("": : :"memory")
50#define rmb() mb()
51#define wmb() asm volatile ("": : :"memory")
52
53#ifdef CONFIG_SMP
54#define smp_mb() mb()
55#define smp_rmb() rmb()
56#define smp_wmb() wmb()
57#else
58#define smp_mb() barrier()
59#define smp_rmb() barrier()
60#define smp_wmb() barrier()
61#endif
62
63#define set_mb(var, value) do { var = value; mb(); } while (0)
64#define set_wmb(var, value) do { var = value; wmb(); } while (0)
65
66#define read_barrier_depends() do {} while (0)
67#define smp_read_barrier_depends() do {} while (0)
68
69/*
70 * we make sure local_irq_enable() doesn't cause priority inversion
71 */
72#ifndef __ASSEMBLY__
73
74/* This function doesn't exist, so you'll get a linker error
75 * if something tries to do an invalid xchg(). */
76extern void __xchg_called_with_bad_pointer(void);
77
78static inline
79unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
80{
81 unsigned long ret, flags;
82
83 switch (size) {
84 case 1:
85#ifdef __xchg_u8
86 return __xchg_u8(x, ptr);
87#else
88 local_irq_save(flags);
89 ret = *(volatile u8 *)ptr;
90 *(volatile u8 *)ptr = x;
91 local_irq_restore(flags);
92 return ret;
93#endif /* __xchg_u8 */
94
95 case 2:
96#ifdef __xchg_u16
97 return __xchg_u16(x, ptr);
98#else
99 local_irq_save(flags);
100 ret = *(volatile u16 *)ptr;
101 *(volatile u16 *)ptr = x;
102 local_irq_restore(flags);
103 return ret;
104#endif /* __xchg_u16 */
105
106 case 4:
107#ifdef __xchg_u32
108 return __xchg_u32(x, ptr);
109#else
110 local_irq_save(flags);
111 ret = *(volatile u32 *)ptr;
112 *(volatile u32 *)ptr = x;
113 local_irq_restore(flags);
114 return ret;
115#endif /* __xchg_u32 */
116
117#ifdef CONFIG_64BIT
118 case 8:
119#ifdef __xchg_u64
120 return __xchg_u64(x, ptr);
121#else
122 local_irq_save(flags);
123 ret = *(volatile u64 *)ptr;
124 *(volatile u64 *)ptr = x;
125 local_irq_restore(flags);
126 return ret;
127#endif /* __xchg_u64 */
128#endif /* CONFIG_64BIT */
129
130 default:
131 __xchg_called_with_bad_pointer();
132 return x;
133 }
134}
135
136#define xchg(ptr, x) \
137 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
138
139static inline unsigned long __cmpxchg(volatile unsigned long *m,
140 unsigned long old, unsigned long new)
141{
142 unsigned long retval;
143 unsigned long flags;
144
145 local_irq_save(flags);
146 retval = *m;
147 if (retval == old)
148 *m = new;
149 local_irq_restore(flags);
150 return retval;
151}
152
153#define cmpxchg(ptr, o, n) \
154 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
155 (unsigned long)(o), \
156 (unsigned long)(n)))
157
158#endif /* !__ASSEMBLY__ */
159
160#endif /* __KERNEL__ */
161#endif /* __ASM_GENERIC_SYSTEM_H */