]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/rcutiny.h
r8169: allocate with GFP_KERNEL flag when able to sleep
[net-next-2.6.git] / include / linux / rcutiny.h
CommitLineData
9b1d82fa
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
4ce5b903 23 * Documentation/RCU
9b1d82fa 24 */
9b1d82fa
PM
25#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
30void rcu_sched_qs(int cpu);
31void rcu_bh_qs(int cpu);
25502a6c
PM
32static inline void rcu_note_context_switch(int cpu)
33{
34 rcu_sched_qs(cpu);
35}
9b1d82fa
PM
36
37#define __rcu_read_lock() preempt_disable()
38#define __rcu_read_unlock() preempt_enable()
39#define __rcu_read_lock_bh() local_bh_disable()
40#define __rcu_read_unlock_bh() local_bh_enable()
41#define call_rcu_sched call_rcu
42
43#define rcu_init_sched() do { } while (0)
44extern void rcu_check_callbacks(int cpu, int user);
9b1d82fa 45
6ebb237b
PM
46static inline int rcu_needs_cpu(int cpu)
47{
48 return 0;
49}
50
9b1d82fa
PM
51/*
52 * Return the number of grace periods.
53 */
54static inline long rcu_batches_completed(void)
55{
56 return 0;
57}
58
59/*
60 * Return the number of bottom-half grace periods.
61 */
62static inline long rcu_batches_completed_bh(void)
63{
64 return 0;
65}
66
bf66f18e
PM
67static inline void rcu_force_quiescent_state(void)
68{
69}
70
71static inline void rcu_bh_force_quiescent_state(void)
72{
73}
74
75static inline void rcu_sched_force_quiescent_state(void)
76{
77}
78
da848c47
PM
79extern void synchronize_sched(void);
80
81static inline void synchronize_rcu(void)
82{
83 synchronize_sched();
84}
85
86static inline void synchronize_rcu_bh(void)
87{
88 synchronize_sched();
89}
6ebb237b 90
9b1d82fa
PM
91static inline void synchronize_rcu_expedited(void)
92{
93 synchronize_sched();
94}
95
96static inline void synchronize_rcu_bh_expedited(void)
97{
98 synchronize_sched();
99}
100
101struct notifier_block;
9b1d82fa
PM
102
103#ifdef CONFIG_NO_HZ
104
105extern void rcu_enter_nohz(void);
106extern void rcu_exit_nohz(void);
107
108#else /* #ifdef CONFIG_NO_HZ */
109
110static inline void rcu_enter_nohz(void)
111{
112}
113
114static inline void rcu_exit_nohz(void)
115{
116}
117
118#endif /* #else #ifdef CONFIG_NO_HZ */
119
120static inline void exit_rcu(void)
121{
122}
123
234da7bc
FW
124static inline int rcu_preempt_depth(void)
125{
126 return 0;
127}
128
bbad9379
PM
129#ifdef CONFIG_DEBUG_LOCK_ALLOC
130
131extern int rcu_scheduler_active __read_mostly;
132extern void rcu_scheduler_starting(void);
133
134#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
135
136static inline void rcu_scheduler_starting(void)
137{
138}
139
140#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
141
9b1d82fa 142#endif /* __LINUX_RCUTINY_H */