]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/h8300/include/asm/system.h
ASoC: Update links for Wolfson MAINTAINERS entry
[net-next-2.6.git] / arch / h8300 / include / asm / system.h
CommitLineData
1da177e4
LT
1#ifndef _H8300_SYSTEM_H
2#define _H8300_SYSTEM_H
3
1da177e4
LT
4#include <linux/linkage.h>
5
1da177e4
LT
6/*
7 * switch_to(n) should switch tasks to task ptr, first checking that
8 * ptr isn't the current task, in which case it does nothing. This
9 * also clears the TS-flag if the task we switched to has used the
10 * math co-processor latest.
11 */
12/*
13 * switch_to() saves the extra registers, that are not saved
14 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
15 * a0-a1. Some of these are used by schedule() and its predecessors
16 * and so we might get see unexpected behaviors when a task returns
17 * with unexpected register values.
18 *
19 * syscall stores these registers itself and none of them are used
20 * by syscall after the function in the syscall has been called.
21 *
22 * Beware that resume now expects *next to be in d1 and the offset of
23 * tss to be in a1. This saves a few instructions as we no longer have
24 * to push them onto the stack and read them back right after.
25 *
26 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
27 *
28 * Changed 96/09/19 by Andreas Schwab
29 * pass prev in a0, next in a1, offset of tss in d1, and whether
30 * the mm structures are shared in d2 (to avoid atc flushing).
31 *
32 * H8/300 Porting 2002/09/04 Yoshinori Sato
33 */
34
35asmlinkage void resume(void);
36#define switch_to(prev,next,last) { \
37 void *_last; \
38 __asm__ __volatile__( \
39 "mov.l %1, er0\n\t" \
40 "mov.l %2, er1\n\t" \
41 "mov.l %3, er2\n\t" \
42 "jsr @_resume\n\t" \
43 "mov.l er2,%0\n\t" \
44 : "=r" (_last) \
45 : "r" (&(prev->thread)), \
46 "r" (&(next->thread)), \
47 "g" (prev) \
48 : "cc", "er0", "er1", "er2", "er3"); \
49 (last) = _last; \
50}
51
52#define __sti() asm volatile ("andc #0x7f,ccr")
53#define __cli() asm volatile ("orc #0x80,ccr")
54
55#define __save_flags(x) \
56 asm volatile ("stc ccr,%w0":"=r" (x))
57
58#define __restore_flags(x) \
59 asm volatile ("ldc %w0,ccr": :"r" (x))
60
61#define irqs_disabled() \
62({ \
63 unsigned char flags; \
64 __save_flags(flags); \
65 ((flags & 0x80) == 0x80); \
66})
67
68#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
69
70/* For spinlocks etc */
71#define local_irq_disable() __cli()
72#define local_irq_enable() __sti()
73#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); })
74#define local_irq_restore(x) __restore_flags(x)
75#define local_save_flags(x) __save_flags(x)
76
77/*
78 * Force strict CPU ordering.
79 * Not really required on H8...
80 */
81#define nop() asm volatile ("nop"::)
82#define mb() asm volatile ("" : : :"memory")
83#define rmb() asm volatile ("" : : :"memory")
84#define wmb() asm volatile ("" : : :"memory")
b2fff3f1 85#define set_mb(var, value) do { xchg(&var, value); } while (0)
1da177e4
LT
86
87#ifdef CONFIG_SMP
88#define smp_mb() mb()
89#define smp_rmb() rmb()
90#define smp_wmb() wmb()
91#define smp_read_barrier_depends() read_barrier_depends()
92#else
93#define smp_mb() barrier()
94#define smp_rmb() barrier()
95#define smp_wmb() barrier()
96#define smp_read_barrier_depends() do { } while(0)
97#endif
98
99#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1da177e4
LT
100
101struct __xchg_dummy { unsigned long a[100]; };
102#define __xg(x) ((volatile struct __xchg_dummy *)(x))
103
104static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
105{
106 unsigned long tmp, flags;
107
108 local_irq_save(flags);
109
110 switch (size) {
111 case 1:
112 __asm__ __volatile__
113 ("mov.b %2,%0\n\t"
114 "mov.b %1,%2"
115 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
116 break;
117 case 2:
118 __asm__ __volatile__
119 ("mov.w %2,%0\n\t"
120 "mov.w %1,%2"
121 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
122 break;
123 case 4:
124 __asm__ __volatile__
125 ("mov.l %2,%0\n\t"
126 "mov.l %1,%2"
127 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
128 break;
129 default:
130 tmp = 0;
131 }
132 local_irq_restore(flags);
133 return tmp;
134}
135
136#define HARD_RESET_NOW() ({ \
137 local_irq_disable(); \
138 asm("jmp @@0"); \
139})
140
aebb77ae
MD
141#include <asm-generic/cmpxchg-local.h>
142
143/*
144 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
145 * them available.
146 */
147#define cmpxchg_local(ptr, o, n) \
148 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
149 (unsigned long)(n), sizeof(*(ptr))))
150#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
151
152#ifndef CONFIG_SMP
153#include <asm-generic/cmpxchg.h>
154#endif
155
1da177e4
LT
156#define arch_align_stack(x) (x)
157
9791af55
YS
158void die(char *str, struct pt_regs *fp, unsigned long err);
159
1da177e4 160#endif /* _H8300_SYSTEM_H */