]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/hardirq.h
Merge branch 'stable/xen-pcifront-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[net-next-2.6.git] / include / linux / hardirq.h
CommitLineData
1da177e4
LT
1#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
67bc4eb0 4#include <linux/preempt.h>
405f5571 5#ifdef CONFIG_PREEMPT
1da177e4 6#include <linux/smp_lock.h>
405f5571 7#endif
fbb9ce95 8#include <linux/lockdep.h>
6a60dd12 9#include <linux/ftrace_irq.h>
1da177e4 10#include <asm/hardirq.h>
1da177e4
LT
11
12/*
13 * We put the hardirq and softirq counter into the preemption
14 * counter. The bitmask has the following meaning:
15 *
16 * - bits 0-7 are the preemption count (max preemption depth: 256)
17 * - bits 8-15 are the softirq count (max # of softirqs: 256)
18 *
5a5fb7db
SR
19 * The hardirq count can in theory reach the same as NR_IRQS.
20 * In reality, the number of nested IRQS is limited to the stack
21 * size as well. For archs with over 1000 IRQS it is not practical
22 * to expect that they will all nest. We give a max of 10 bits for
23 * hardirq nesting. An arch may choose to give less than 10 bits.
24 * m68k expects it to be 8.
1da177e4 25 *
5a5fb7db
SR
26 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
27 * - bit 26 is the NMI_MASK
28 * - bit 28 is the PREEMPT_ACTIVE flag
1da177e4
LT
29 *
30 * PREEMPT_MASK: 0x000000ff
31 * SOFTIRQ_MASK: 0x0000ff00
5a5fb7db
SR
32 * HARDIRQ_MASK: 0x03ff0000
33 * NMI_MASK: 0x04000000
1da177e4
LT
34 */
35#define PREEMPT_BITS 8
36#define SOFTIRQ_BITS 8
5a5fb7db 37#define NMI_BITS 1
1da177e4 38
5a5fb7db 39#define MAX_HARDIRQ_BITS 10
23d0b8b0 40
5a5fb7db
SR
41#ifndef HARDIRQ_BITS
42# define HARDIRQ_BITS MAX_HARDIRQ_BITS
23d0b8b0
EB
43#endif
44
5a5fb7db
SR
45#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
46#error HARDIRQ_BITS too high!
1da177e4
LT
47#endif
48
49#define PREEMPT_SHIFT 0
50#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
51#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
5a5fb7db 52#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
1da177e4
LT
53
54#define __IRQ_MASK(x) ((1UL << (x))-1)
55
56#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
1da177e4 57#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
8f28e8fa 58#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
5a5fb7db 59#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
1da177e4
LT
60
61#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
62#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
63#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
5a5fb7db 64#define NMI_OFFSET (1UL << NMI_SHIFT)
1da177e4 65
75e1056f
VP
66#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
67
8e5b59a2
AB
68#ifndef PREEMPT_ACTIVE
69#define PREEMPT_ACTIVE_BITS 1
70#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
71#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
72#endif
73
5a5fb7db 74#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
8f28e8fa
PBG
75#error PREEMPT_ACTIVE is too low!
76#endif
77
1da177e4
LT
78#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
79#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
5a5fb7db
SR
80#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
81 | NMI_MASK))
1da177e4
LT
82
83/*
84 * Are we doing bottom half or hardware interrupt processing?
85 * Are we in a softirq context? Interrupt context?
75e1056f
VP
86 * in_softirq - Are we currently processing softirq or have bh disabled?
87 * in_serving_softirq - Are we currently processing softirq?
1da177e4
LT
88 */
89#define in_irq() (hardirq_count())
90#define in_softirq() (softirq_count())
91#define in_interrupt() (irq_count())
75e1056f 92#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
1da177e4 93
375b38b4
SR
94/*
95 * Are we in NMI context?
96 */
5a5fb7db 97#define in_nmi() (preempt_count() & NMI_MASK)
375b38b4 98
7fe19da4 99#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
8e3e076c 100# define PREEMPT_INATOMIC_BASE kernel_locked()
8e3e076c
LT
101#else
102# define PREEMPT_INATOMIC_BASE 0
7fe19da4
AB
103#endif
104
105#if defined(CONFIG_PREEMPT)
106# define PREEMPT_CHECK_OFFSET 1
107#else
8e3e076c
LT
108# define PREEMPT_CHECK_OFFSET 0
109#endif
110
8c703d35
JC
111/*
112 * Are we running in atomic context? WARNING: this macro cannot
113 * always detect atomic context; in particular, it cannot know about
114 * held spinlocks in non-preemptible kernels. Thus it should not be
115 * used in the general case to determine whether sleeping is possible.
116 * Do not use in_atomic() in driver code.
117 */
8e3e076c 118#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
4da1ce6d
IM
119
120/*
121 * Check whether we were atomic before we did preempt_disable():
8e3e076c 122 * (used by the scheduler, *after* releasing the kernel lock)
4da1ce6d
IM
123 */
124#define in_atomic_preempt_off() \
125 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
126
1da177e4
LT
127#ifdef CONFIG_PREEMPT
128# define preemptible() (preempt_count() == 0 && !irqs_disabled())
129# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
130#else
131# define preemptible() 0
132# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
133#endif
134
3aa551c9 135#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
1da177e4
LT
136extern void synchronize_irq(unsigned int irq);
137#else
138# define synchronize_irq(irq) barrier()
139#endif
140
f037360f
AV
141struct task_struct;
142
b52bfee4 143#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
1da177e4
LT
144static inline void account_system_vtime(struct task_struct *tsk)
145{
146}
e1e10a26
VP
147#else
148extern void account_system_vtime(struct task_struct *tsk);
1da177e4
LT
149#endif
150
b560d8ad 151#if defined(CONFIG_NO_HZ)
a57eb940 152#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
9b1d82fa
PM
153extern void rcu_enter_nohz(void);
154extern void rcu_exit_nohz(void);
155
156static inline void rcu_irq_enter(void)
157{
158 rcu_exit_nohz();
159}
160
161static inline void rcu_irq_exit(void)
162{
163 rcu_enter_nohz();
164}
165
166static inline void rcu_nmi_enter(void)
167{
168}
169
170static inline void rcu_nmi_exit(void)
171{
172}
173
174#else
2232c2d8
SR
175extern void rcu_irq_enter(void);
176extern void rcu_irq_exit(void);
64db4cff
PM
177extern void rcu_nmi_enter(void);
178extern void rcu_nmi_exit(void);
9b1d82fa 179#endif
2232c2d8
SR
180#else
181# define rcu_irq_enter() do { } while (0)
182# define rcu_irq_exit() do { } while (0)
64db4cff
PM
183# define rcu_nmi_enter() do { } while (0)
184# define rcu_nmi_exit() do { } while (0)
b560d8ad 185#endif /* #if defined(CONFIG_NO_HZ) */
2232c2d8 186
de30a2b3
IM
187/*
188 * It is safe to do non-atomic ops on ->hardirq_context,
189 * because NMI handlers may not preempt and the ops are
190 * always balanced, so the interrupted value of ->hardirq_context
191 * will always be restored.
192 */
79bf2bb3
TG
193#define __irq_enter() \
194 do { \
195 account_system_vtime(current); \
196 add_preempt_count(HARDIRQ_OFFSET); \
197 trace_hardirq_enter(); \
198 } while (0)
199
200/*
201 * Enter irq context (on NO_HZ, update jiffies):
202 */
dde4b2b5 203extern void irq_enter(void);
de30a2b3
IM
204
205/*
206 * Exit irq context without processing softirqs:
207 */
208#define __irq_exit() \
209 do { \
210 trace_hardirq_exit(); \
211 account_system_vtime(current); \
212 sub_preempt_count(HARDIRQ_OFFSET); \
1da177e4
LT
213 } while (0)
214
de30a2b3
IM
215/*
216 * Exit irq context and process softirqs if needed:
217 */
1da177e4
LT
218extern void irq_exit(void);
219
2a7b8df0
SR
220#define nmi_enter() \
221 do { \
222 ftrace_nmi_enter(); \
223 BUG_ON(in_nmi()); \
224 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
225 lockdep_off(); \
226 rcu_nmi_enter(); \
227 trace_hardirq_enter(); \
17666f02 228 } while (0)
5f34fe1c 229
2a7b8df0
SR
230#define nmi_exit() \
231 do { \
232 trace_hardirq_exit(); \
233 rcu_nmi_exit(); \
234 lockdep_on(); \
235 BUG_ON(!in_nmi()); \
236 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
237 ftrace_nmi_exit(); \
17666f02 238 } while (0)
de30a2b3 239
1da177e4 240#endif /* LINUX_HARDIRQ_H */