]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Read-Copy Update module-based torture test facility | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2005, 2006 | |
19 | * | |
20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> | |
21 | * Josh Triplett <josh@freedesktop.org> | |
22 | * | |
23 | * See also: Documentation/RCU/torture.txt | |
24 | */ | |
25 | #include <linux/types.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/kthread.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/smp.h> | |
33 | #include <linux/rcupdate.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/sched.h> | |
36 | #include <asm/atomic.h> | |
37 | #include <linux/bitops.h> | |
38 | #include <linux/completion.h> | |
39 | #include <linux/moduleparam.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/notifier.h> | |
42 | #include <linux/reboot.h> | |
43 | #include <linux/freezer.h> | |
44 | #include <linux/cpu.h> | |
45 | #include <linux/delay.h> | |
46 | #include <linux/stat.h> | |
47 | #include <linux/srcu.h> | |
48 | #include <linux/slab.h> | |
49 | #include <asm/byteorder.h> | |
50 | ||
51 | MODULE_LICENSE("GPL"); | |
52 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " | |
53 | "Josh Triplett <josh@freedesktop.org>"); | |
54 | ||
55 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ | |
56 | static int nfakewriters = 4; /* # fake writer threads */ | |
57 | static int stat_interval; /* Interval between stats, in seconds. */ | |
58 | /* Defaults to "only at end of test". */ | |
59 | static int verbose; /* Print more debug info. */ | |
60 | static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | |
61 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ | |
62 | static int stutter = 5; /* Start/stop testing interval (in sec) */ | |
63 | static int irqreader = 1; /* RCU readers from irq (timers). */ | |
64 | static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */ | |
65 | static int fqs_holdoff = 0; /* Hold time within burst (us). */ | |
66 | static int fqs_stutter = 3; /* Wait time between bursts (s). */ | |
67 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | |
68 | ||
69 | module_param(nreaders, int, 0444); | |
70 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | |
71 | module_param(nfakewriters, int, 0444); | |
72 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | |
73 | module_param(stat_interval, int, 0444); | |
74 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | |
75 | module_param(verbose, bool, 0444); | |
76 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | |
77 | module_param(test_no_idle_hz, bool, 0444); | |
78 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | |
79 | module_param(shuffle_interval, int, 0444); | |
80 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | |
81 | module_param(stutter, int, 0444); | |
82 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | |
83 | module_param(irqreader, int, 0444); | |
84 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); | |
85 | module_param(fqs_duration, int, 0444); | |
86 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)"); | |
87 | module_param(fqs_holdoff, int, 0444); | |
88 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); | |
89 | module_param(fqs_stutter, int, 0444); | |
90 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); | |
91 | module_param(torture_type, charp, 0444); | |
92 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | |
93 | ||
94 | #define TORTURE_FLAG "-torture:" | |
95 | #define PRINTK_STRING(s) \ | |
96 | do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) | |
97 | #define VERBOSE_PRINTK_STRING(s) \ | |
98 | do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) | |
99 | #define VERBOSE_PRINTK_ERRSTRING(s) \ | |
100 | do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) | |
101 | ||
102 | static char printk_buf[4096]; | |
103 | ||
104 | static int nrealreaders; | |
105 | static struct task_struct *writer_task; | |
106 | static struct task_struct **fakewriter_tasks; | |
107 | static struct task_struct **reader_tasks; | |
108 | static struct task_struct *stats_task; | |
109 | static struct task_struct *shuffler_task; | |
110 | static struct task_struct *stutter_task; | |
111 | static struct task_struct *fqs_task; | |
112 | ||
113 | #define RCU_TORTURE_PIPE_LEN 10 | |
114 | ||
115 | struct rcu_torture { | |
116 | struct rcu_head rtort_rcu; | |
117 | int rtort_pipe_count; | |
118 | struct list_head rtort_free; | |
119 | int rtort_mbtest; | |
120 | }; | |
121 | ||
122 | static LIST_HEAD(rcu_torture_freelist); | |
123 | static struct rcu_torture *rcu_torture_current; | |
124 | static long rcu_torture_current_version; | |
125 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | |
126 | static DEFINE_SPINLOCK(rcu_torture_lock); | |
127 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = | |
128 | { 0 }; | |
129 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = | |
130 | { 0 }; | |
131 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; | |
132 | static atomic_t n_rcu_torture_alloc; | |
133 | static atomic_t n_rcu_torture_alloc_fail; | |
134 | static atomic_t n_rcu_torture_free; | |
135 | static atomic_t n_rcu_torture_mberror; | |
136 | static atomic_t n_rcu_torture_error; | |
137 | static long n_rcu_torture_timers; | |
138 | static struct list_head rcu_torture_removed; | |
139 | static cpumask_var_t shuffle_tmp_mask; | |
140 | ||
141 | static int stutter_pause_test; | |
142 | ||
143 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) | |
144 | #define RCUTORTURE_RUNNABLE_INIT 1 | |
145 | #else | |
146 | #define RCUTORTURE_RUNNABLE_INIT 0 | |
147 | #endif | |
148 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | |
149 | ||
150 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ | |
151 | ||
152 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ | |
153 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ | |
154 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ | |
155 | static int fullstop = FULLSTOP_RMMOD; | |
156 | DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */ | |
157 | /* of kthreads. */ | |
158 | ||
159 | /* | |
160 | * Detect and respond to a system shutdown. | |
161 | */ | |
162 | static int | |
163 | rcutorture_shutdown_notify(struct notifier_block *unused1, | |
164 | unsigned long unused2, void *unused3) | |
165 | { | |
166 | mutex_lock(&fullstop_mutex); | |
167 | if (fullstop == FULLSTOP_DONTSTOP) | |
168 | fullstop = FULLSTOP_SHUTDOWN; | |
169 | else | |
170 | printk(KERN_WARNING /* but going down anyway, so... */ | |
171 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | |
172 | mutex_unlock(&fullstop_mutex); | |
173 | return NOTIFY_DONE; | |
174 | } | |
175 | ||
176 | /* | |
177 | * Absorb kthreads into a kernel function that won't return, so that | |
178 | * they won't ever access module text or data again. | |
179 | */ | |
180 | static void rcutorture_shutdown_absorb(char *title) | |
181 | { | |
182 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | |
183 | printk(KERN_NOTICE | |
184 | "rcutorture thread %s parking due to system shutdown\n", | |
185 | title); | |
186 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | |
187 | } | |
188 | } | |
189 | ||
190 | /* | |
191 | * Allocate an element from the rcu_tortures pool. | |
192 | */ | |
193 | static struct rcu_torture * | |
194 | rcu_torture_alloc(void) | |
195 | { | |
196 | struct list_head *p; | |
197 | ||
198 | spin_lock_bh(&rcu_torture_lock); | |
199 | if (list_empty(&rcu_torture_freelist)) { | |
200 | atomic_inc(&n_rcu_torture_alloc_fail); | |
201 | spin_unlock_bh(&rcu_torture_lock); | |
202 | return NULL; | |
203 | } | |
204 | atomic_inc(&n_rcu_torture_alloc); | |
205 | p = rcu_torture_freelist.next; | |
206 | list_del_init(p); | |
207 | spin_unlock_bh(&rcu_torture_lock); | |
208 | return container_of(p, struct rcu_torture, rtort_free); | |
209 | } | |
210 | ||
211 | /* | |
212 | * Free an element to the rcu_tortures pool. | |
213 | */ | |
214 | static void | |
215 | rcu_torture_free(struct rcu_torture *p) | |
216 | { | |
217 | atomic_inc(&n_rcu_torture_free); | |
218 | spin_lock_bh(&rcu_torture_lock); | |
219 | list_add_tail(&p->rtort_free, &rcu_torture_freelist); | |
220 | spin_unlock_bh(&rcu_torture_lock); | |
221 | } | |
222 | ||
223 | struct rcu_random_state { | |
224 | unsigned long rrs_state; | |
225 | long rrs_count; | |
226 | }; | |
227 | ||
228 | #define RCU_RANDOM_MULT 39916801 /* prime */ | |
229 | #define RCU_RANDOM_ADD 479001701 /* prime */ | |
230 | #define RCU_RANDOM_REFRESH 10000 | |
231 | ||
232 | #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 } | |
233 | ||
234 | /* | |
235 | * Crude but fast random-number generator. Uses a linear congruential | |
236 | * generator, with occasional help from cpu_clock(). | |
237 | */ | |
238 | static unsigned long | |
239 | rcu_random(struct rcu_random_state *rrsp) | |
240 | { | |
241 | if (--rrsp->rrs_count < 0) { | |
242 | rrsp->rrs_state += | |
243 | (unsigned long)cpu_clock(raw_smp_processor_id()); | |
244 | rrsp->rrs_count = RCU_RANDOM_REFRESH; | |
245 | } | |
246 | rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD; | |
247 | return swahw32(rrsp->rrs_state); | |
248 | } | |
249 | ||
250 | static void | |
251 | rcu_stutter_wait(char *title) | |
252 | { | |
253 | while (stutter_pause_test || !rcutorture_runnable) { | |
254 | if (rcutorture_runnable) | |
255 | schedule_timeout_interruptible(1); | |
256 | else | |
257 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | |
258 | rcutorture_shutdown_absorb(title); | |
259 | } | |
260 | } | |
261 | ||
262 | /* | |
263 | * Operations vector for selecting different types of tests. | |
264 | */ | |
265 | ||
266 | struct rcu_torture_ops { | |
267 | void (*init)(void); | |
268 | void (*cleanup)(void); | |
269 | int (*readlock)(void); | |
270 | void (*read_delay)(struct rcu_random_state *rrsp); | |
271 | void (*readunlock)(int idx); | |
272 | int (*completed)(void); | |
273 | void (*deferred_free)(struct rcu_torture *p); | |
274 | void (*sync)(void); | |
275 | void (*cb_barrier)(void); | |
276 | void (*fqs)(void); | |
277 | int (*stats)(char *page); | |
278 | int irq_capable; | |
279 | char *name; | |
280 | }; | |
281 | ||
282 | static struct rcu_torture_ops *cur_ops; | |
283 | ||
284 | /* | |
285 | * Definitions for rcu torture testing. | |
286 | */ | |
287 | ||
288 | static int rcu_torture_read_lock(void) __acquires(RCU) | |
289 | { | |
290 | rcu_read_lock(); | |
291 | return 0; | |
292 | } | |
293 | ||
294 | static void rcu_read_delay(struct rcu_random_state *rrsp) | |
295 | { | |
296 | const unsigned long shortdelay_us = 200; | |
297 | const unsigned long longdelay_ms = 50; | |
298 | ||
299 | /* We want a short delay sometimes to make a reader delay the grace | |
300 | * period, and we want a long delay occasionally to trigger | |
301 | * force_quiescent_state. */ | |
302 | ||
303 | if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) | |
304 | mdelay(longdelay_ms); | |
305 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) | |
306 | udelay(shortdelay_us); | |
307 | } | |
308 | ||
309 | static void rcu_torture_read_unlock(int idx) __releases(RCU) | |
310 | { | |
311 | rcu_read_unlock(); | |
312 | } | |
313 | ||
314 | static int rcu_torture_completed(void) | |
315 | { | |
316 | return rcu_batches_completed(); | |
317 | } | |
318 | ||
319 | static void | |
320 | rcu_torture_cb(struct rcu_head *p) | |
321 | { | |
322 | int i; | |
323 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | |
324 | ||
325 | if (fullstop != FULLSTOP_DONTSTOP) { | |
326 | /* Test is ending, just drop callbacks on the floor. */ | |
327 | /* The next initialization will pick up the pieces. */ | |
328 | return; | |
329 | } | |
330 | i = rp->rtort_pipe_count; | |
331 | if (i > RCU_TORTURE_PIPE_LEN) | |
332 | i = RCU_TORTURE_PIPE_LEN; | |
333 | atomic_inc(&rcu_torture_wcount[i]); | |
334 | if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { | |
335 | rp->rtort_mbtest = 0; | |
336 | rcu_torture_free(rp); | |
337 | } else | |
338 | cur_ops->deferred_free(rp); | |
339 | } | |
340 | ||
341 | static int rcu_no_completed(void) | |
342 | { | |
343 | return 0; | |
344 | } | |
345 | ||
346 | static void rcu_torture_deferred_free(struct rcu_torture *p) | |
347 | { | |
348 | call_rcu(&p->rtort_rcu, rcu_torture_cb); | |
349 | } | |
350 | ||
351 | static struct rcu_torture_ops rcu_ops = { | |
352 | .init = NULL, | |
353 | .cleanup = NULL, | |
354 | .readlock = rcu_torture_read_lock, | |
355 | .read_delay = rcu_read_delay, | |
356 | .readunlock = rcu_torture_read_unlock, | |
357 | .completed = rcu_torture_completed, | |
358 | .deferred_free = rcu_torture_deferred_free, | |
359 | .sync = synchronize_rcu, | |
360 | .cb_barrier = rcu_barrier, | |
361 | .fqs = rcu_force_quiescent_state, | |
362 | .stats = NULL, | |
363 | .irq_capable = 1, | |
364 | .name = "rcu" | |
365 | }; | |
366 | ||
367 | static void rcu_sync_torture_deferred_free(struct rcu_torture *p) | |
368 | { | |
369 | int i; | |
370 | struct rcu_torture *rp; | |
371 | struct rcu_torture *rp1; | |
372 | ||
373 | cur_ops->sync(); | |
374 | list_add(&p->rtort_free, &rcu_torture_removed); | |
375 | list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { | |
376 | i = rp->rtort_pipe_count; | |
377 | if (i > RCU_TORTURE_PIPE_LEN) | |
378 | i = RCU_TORTURE_PIPE_LEN; | |
379 | atomic_inc(&rcu_torture_wcount[i]); | |
380 | if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { | |
381 | rp->rtort_mbtest = 0; | |
382 | list_del(&rp->rtort_free); | |
383 | rcu_torture_free(rp); | |
384 | } | |
385 | } | |
386 | } | |
387 | ||
388 | static void rcu_sync_torture_init(void) | |
389 | { | |
390 | INIT_LIST_HEAD(&rcu_torture_removed); | |
391 | } | |
392 | ||
393 | static struct rcu_torture_ops rcu_sync_ops = { | |
394 | .init = rcu_sync_torture_init, | |
395 | .cleanup = NULL, | |
396 | .readlock = rcu_torture_read_lock, | |
397 | .read_delay = rcu_read_delay, | |
398 | .readunlock = rcu_torture_read_unlock, | |
399 | .completed = rcu_torture_completed, | |
400 | .deferred_free = rcu_sync_torture_deferred_free, | |
401 | .sync = synchronize_rcu, | |
402 | .cb_barrier = NULL, | |
403 | .fqs = rcu_force_quiescent_state, | |
404 | .stats = NULL, | |
405 | .irq_capable = 1, | |
406 | .name = "rcu_sync" | |
407 | }; | |
408 | ||
409 | static struct rcu_torture_ops rcu_expedited_ops = { | |
410 | .init = rcu_sync_torture_init, | |
411 | .cleanup = NULL, | |
412 | .readlock = rcu_torture_read_lock, | |
413 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
414 | .readunlock = rcu_torture_read_unlock, | |
415 | .completed = rcu_no_completed, | |
416 | .deferred_free = rcu_sync_torture_deferred_free, | |
417 | .sync = synchronize_rcu_expedited, | |
418 | .cb_barrier = NULL, | |
419 | .fqs = rcu_force_quiescent_state, | |
420 | .stats = NULL, | |
421 | .irq_capable = 1, | |
422 | .name = "rcu_expedited" | |
423 | }; | |
424 | ||
425 | /* | |
426 | * Definitions for rcu_bh torture testing. | |
427 | */ | |
428 | ||
429 | static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) | |
430 | { | |
431 | rcu_read_lock_bh(); | |
432 | return 0; | |
433 | } | |
434 | ||
435 | static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) | |
436 | { | |
437 | rcu_read_unlock_bh(); | |
438 | } | |
439 | ||
440 | static int rcu_bh_torture_completed(void) | |
441 | { | |
442 | return rcu_batches_completed_bh(); | |
443 | } | |
444 | ||
445 | static void rcu_bh_torture_deferred_free(struct rcu_torture *p) | |
446 | { | |
447 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); | |
448 | } | |
449 | ||
450 | struct rcu_bh_torture_synchronize { | |
451 | struct rcu_head head; | |
452 | struct completion completion; | |
453 | }; | |
454 | ||
455 | static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head) | |
456 | { | |
457 | struct rcu_bh_torture_synchronize *rcu; | |
458 | ||
459 | rcu = container_of(head, struct rcu_bh_torture_synchronize, head); | |
460 | complete(&rcu->completion); | |
461 | } | |
462 | ||
463 | static void rcu_bh_torture_synchronize(void) | |
464 | { | |
465 | struct rcu_bh_torture_synchronize rcu; | |
466 | ||
467 | init_rcu_head_on_stack(&rcu.head); | |
468 | init_completion(&rcu.completion); | |
469 | call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); | |
470 | wait_for_completion(&rcu.completion); | |
471 | destroy_rcu_head_on_stack(&rcu.head); | |
472 | } | |
473 | ||
474 | static struct rcu_torture_ops rcu_bh_ops = { | |
475 | .init = NULL, | |
476 | .cleanup = NULL, | |
477 | .readlock = rcu_bh_torture_read_lock, | |
478 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
479 | .readunlock = rcu_bh_torture_read_unlock, | |
480 | .completed = rcu_bh_torture_completed, | |
481 | .deferred_free = rcu_bh_torture_deferred_free, | |
482 | .sync = rcu_bh_torture_synchronize, | |
483 | .cb_barrier = rcu_barrier_bh, | |
484 | .fqs = rcu_bh_force_quiescent_state, | |
485 | .stats = NULL, | |
486 | .irq_capable = 1, | |
487 | .name = "rcu_bh" | |
488 | }; | |
489 | ||
490 | static struct rcu_torture_ops rcu_bh_sync_ops = { | |
491 | .init = rcu_sync_torture_init, | |
492 | .cleanup = NULL, | |
493 | .readlock = rcu_bh_torture_read_lock, | |
494 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
495 | .readunlock = rcu_bh_torture_read_unlock, | |
496 | .completed = rcu_bh_torture_completed, | |
497 | .deferred_free = rcu_sync_torture_deferred_free, | |
498 | .sync = rcu_bh_torture_synchronize, | |
499 | .cb_barrier = NULL, | |
500 | .fqs = rcu_bh_force_quiescent_state, | |
501 | .stats = NULL, | |
502 | .irq_capable = 1, | |
503 | .name = "rcu_bh_sync" | |
504 | }; | |
505 | ||
506 | /* | |
507 | * Definitions for srcu torture testing. | |
508 | */ | |
509 | ||
510 | static struct srcu_struct srcu_ctl; | |
511 | ||
512 | static void srcu_torture_init(void) | |
513 | { | |
514 | init_srcu_struct(&srcu_ctl); | |
515 | rcu_sync_torture_init(); | |
516 | } | |
517 | ||
518 | static void srcu_torture_cleanup(void) | |
519 | { | |
520 | synchronize_srcu(&srcu_ctl); | |
521 | cleanup_srcu_struct(&srcu_ctl); | |
522 | } | |
523 | ||
524 | static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) | |
525 | { | |
526 | return srcu_read_lock(&srcu_ctl); | |
527 | } | |
528 | ||
529 | static void srcu_read_delay(struct rcu_random_state *rrsp) | |
530 | { | |
531 | long delay; | |
532 | const long uspertick = 1000000 / HZ; | |
533 | const long longdelay = 10; | |
534 | ||
535 | /* We want there to be long-running readers, but not all the time. */ | |
536 | ||
537 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); | |
538 | if (!delay) | |
539 | schedule_timeout_interruptible(longdelay); | |
540 | } | |
541 | ||
542 | static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) | |
543 | { | |
544 | srcu_read_unlock(&srcu_ctl, idx); | |
545 | } | |
546 | ||
547 | static int srcu_torture_completed(void) | |
548 | { | |
549 | return srcu_batches_completed(&srcu_ctl); | |
550 | } | |
551 | ||
552 | static void srcu_torture_synchronize(void) | |
553 | { | |
554 | synchronize_srcu(&srcu_ctl); | |
555 | } | |
556 | ||
557 | static int srcu_torture_stats(char *page) | |
558 | { | |
559 | int cnt = 0; | |
560 | int cpu; | |
561 | int idx = srcu_ctl.completed & 0x1; | |
562 | ||
563 | cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):", | |
564 | torture_type, TORTURE_FLAG, idx); | |
565 | for_each_possible_cpu(cpu) { | |
566 | cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu, | |
567 | per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx], | |
568 | per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]); | |
569 | } | |
570 | cnt += sprintf(&page[cnt], "\n"); | |
571 | return cnt; | |
572 | } | |
573 | ||
574 | static struct rcu_torture_ops srcu_ops = { | |
575 | .init = srcu_torture_init, | |
576 | .cleanup = srcu_torture_cleanup, | |
577 | .readlock = srcu_torture_read_lock, | |
578 | .read_delay = srcu_read_delay, | |
579 | .readunlock = srcu_torture_read_unlock, | |
580 | .completed = srcu_torture_completed, | |
581 | .deferred_free = rcu_sync_torture_deferred_free, | |
582 | .sync = srcu_torture_synchronize, | |
583 | .cb_barrier = NULL, | |
584 | .stats = srcu_torture_stats, | |
585 | .name = "srcu" | |
586 | }; | |
587 | ||
588 | static void srcu_torture_synchronize_expedited(void) | |
589 | { | |
590 | synchronize_srcu_expedited(&srcu_ctl); | |
591 | } | |
592 | ||
593 | static struct rcu_torture_ops srcu_expedited_ops = { | |
594 | .init = srcu_torture_init, | |
595 | .cleanup = srcu_torture_cleanup, | |
596 | .readlock = srcu_torture_read_lock, | |
597 | .read_delay = srcu_read_delay, | |
598 | .readunlock = srcu_torture_read_unlock, | |
599 | .completed = srcu_torture_completed, | |
600 | .deferred_free = rcu_sync_torture_deferred_free, | |
601 | .sync = srcu_torture_synchronize_expedited, | |
602 | .cb_barrier = NULL, | |
603 | .stats = srcu_torture_stats, | |
604 | .name = "srcu_expedited" | |
605 | }; | |
606 | ||
607 | /* | |
608 | * Definitions for sched torture testing. | |
609 | */ | |
610 | ||
611 | static int sched_torture_read_lock(void) | |
612 | { | |
613 | preempt_disable(); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | static void sched_torture_read_unlock(int idx) | |
618 | { | |
619 | preempt_enable(); | |
620 | } | |
621 | ||
622 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) | |
623 | { | |
624 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); | |
625 | } | |
626 | ||
627 | static void sched_torture_synchronize(void) | |
628 | { | |
629 | synchronize_sched(); | |
630 | } | |
631 | ||
632 | static struct rcu_torture_ops sched_ops = { | |
633 | .init = rcu_sync_torture_init, | |
634 | .cleanup = NULL, | |
635 | .readlock = sched_torture_read_lock, | |
636 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
637 | .readunlock = sched_torture_read_unlock, | |
638 | .completed = rcu_no_completed, | |
639 | .deferred_free = rcu_sched_torture_deferred_free, | |
640 | .sync = sched_torture_synchronize, | |
641 | .cb_barrier = rcu_barrier_sched, | |
642 | .fqs = rcu_sched_force_quiescent_state, | |
643 | .stats = NULL, | |
644 | .irq_capable = 1, | |
645 | .name = "sched" | |
646 | }; | |
647 | ||
648 | static struct rcu_torture_ops sched_sync_ops = { | |
649 | .init = rcu_sync_torture_init, | |
650 | .cleanup = NULL, | |
651 | .readlock = sched_torture_read_lock, | |
652 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
653 | .readunlock = sched_torture_read_unlock, | |
654 | .completed = rcu_no_completed, | |
655 | .deferred_free = rcu_sync_torture_deferred_free, | |
656 | .sync = sched_torture_synchronize, | |
657 | .cb_barrier = NULL, | |
658 | .fqs = rcu_sched_force_quiescent_state, | |
659 | .stats = NULL, | |
660 | .name = "sched_sync" | |
661 | }; | |
662 | ||
663 | static struct rcu_torture_ops sched_expedited_ops = { | |
664 | .init = rcu_sync_torture_init, | |
665 | .cleanup = NULL, | |
666 | .readlock = sched_torture_read_lock, | |
667 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
668 | .readunlock = sched_torture_read_unlock, | |
669 | .completed = rcu_no_completed, | |
670 | .deferred_free = rcu_sync_torture_deferred_free, | |
671 | .sync = synchronize_sched_expedited, | |
672 | .cb_barrier = NULL, | |
673 | .fqs = rcu_sched_force_quiescent_state, | |
674 | .stats = NULL, | |
675 | .irq_capable = 1, | |
676 | .name = "sched_expedited" | |
677 | }; | |
678 | ||
679 | /* | |
680 | * RCU torture force-quiescent-state kthread. Repeatedly induces | |
681 | * bursts of calls to force_quiescent_state(), increasing the probability | |
682 | * of occurrence of some important types of race conditions. | |
683 | */ | |
684 | static int | |
685 | rcu_torture_fqs(void *arg) | |
686 | { | |
687 | unsigned long fqs_resume_time; | |
688 | int fqs_burst_remaining; | |
689 | ||
690 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task started"); | |
691 | do { | |
692 | fqs_resume_time = jiffies + fqs_stutter * HZ; | |
693 | while (jiffies - fqs_resume_time > LONG_MAX) { | |
694 | schedule_timeout_interruptible(1); | |
695 | } | |
696 | fqs_burst_remaining = fqs_duration; | |
697 | while (fqs_burst_remaining > 0) { | |
698 | cur_ops->fqs(); | |
699 | udelay(fqs_holdoff); | |
700 | fqs_burst_remaining -= fqs_holdoff; | |
701 | } | |
702 | rcu_stutter_wait("rcu_torture_fqs"); | |
703 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | |
704 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping"); | |
705 | rcutorture_shutdown_absorb("rcu_torture_fqs"); | |
706 | while (!kthread_should_stop()) | |
707 | schedule_timeout_uninterruptible(1); | |
708 | return 0; | |
709 | } | |
710 | ||
711 | /* | |
712 | * RCU torture writer kthread. Repeatedly substitutes a new structure | |
713 | * for that pointed to by rcu_torture_current, freeing the old structure | |
714 | * after a series of grace periods (the "pipeline"). | |
715 | */ | |
716 | static int | |
717 | rcu_torture_writer(void *arg) | |
718 | { | |
719 | int i; | |
720 | long oldbatch = rcu_batches_completed(); | |
721 | struct rcu_torture *rp; | |
722 | struct rcu_torture *old_rp; | |
723 | static DEFINE_RCU_RANDOM(rand); | |
724 | ||
725 | VERBOSE_PRINTK_STRING("rcu_torture_writer task started"); | |
726 | set_user_nice(current, 19); | |
727 | ||
728 | do { | |
729 | schedule_timeout_uninterruptible(1); | |
730 | rp = rcu_torture_alloc(); | |
731 | if (rp == NULL) | |
732 | continue; | |
733 | rp->rtort_pipe_count = 0; | |
734 | udelay(rcu_random(&rand) & 0x3ff); | |
735 | old_rp = rcu_torture_current; | |
736 | rp->rtort_mbtest = 1; | |
737 | rcu_assign_pointer(rcu_torture_current, rp); | |
738 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ | |
739 | if (old_rp) { | |
740 | i = old_rp->rtort_pipe_count; | |
741 | if (i > RCU_TORTURE_PIPE_LEN) | |
742 | i = RCU_TORTURE_PIPE_LEN; | |
743 | atomic_inc(&rcu_torture_wcount[i]); | |
744 | old_rp->rtort_pipe_count++; | |
745 | cur_ops->deferred_free(old_rp); | |
746 | } | |
747 | rcu_torture_current_version++; | |
748 | oldbatch = cur_ops->completed(); | |
749 | rcu_stutter_wait("rcu_torture_writer"); | |
750 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | |
751 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | |
752 | rcutorture_shutdown_absorb("rcu_torture_writer"); | |
753 | while (!kthread_should_stop()) | |
754 | schedule_timeout_uninterruptible(1); | |
755 | return 0; | |
756 | } | |
757 | ||
758 | /* | |
759 | * RCU torture fake writer kthread. Repeatedly calls sync, with a random | |
760 | * delay between calls. | |
761 | */ | |
762 | static int | |
763 | rcu_torture_fakewriter(void *arg) | |
764 | { | |
765 | DEFINE_RCU_RANDOM(rand); | |
766 | ||
767 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started"); | |
768 | set_user_nice(current, 19); | |
769 | ||
770 | do { | |
771 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | |
772 | udelay(rcu_random(&rand) & 0x3ff); | |
773 | cur_ops->sync(); | |
774 | rcu_stutter_wait("rcu_torture_fakewriter"); | |
775 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | |
776 | ||
777 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | |
778 | rcutorture_shutdown_absorb("rcu_torture_fakewriter"); | |
779 | while (!kthread_should_stop()) | |
780 | schedule_timeout_uninterruptible(1); | |
781 | return 0; | |
782 | } | |
783 | ||
784 | /* | |
785 | * RCU torture reader from timer handler. Dereferences rcu_torture_current, | |
786 | * incrementing the corresponding element of the pipeline array. The | |
787 | * counter in the element should never be greater than 1, otherwise, the | |
788 | * RCU implementation is broken. | |
789 | */ | |
790 | static void rcu_torture_timer(unsigned long unused) | |
791 | { | |
792 | int idx; | |
793 | int completed; | |
794 | static DEFINE_RCU_RANDOM(rand); | |
795 | static DEFINE_SPINLOCK(rand_lock); | |
796 | struct rcu_torture *p; | |
797 | int pipe_count; | |
798 | ||
799 | idx = cur_ops->readlock(); | |
800 | completed = cur_ops->completed(); | |
801 | p = rcu_dereference_check(rcu_torture_current, | |
802 | rcu_read_lock_held() || | |
803 | rcu_read_lock_bh_held() || | |
804 | rcu_read_lock_sched_held() || | |
805 | srcu_read_lock_held(&srcu_ctl)); | |
806 | if (p == NULL) { | |
807 | /* Leave because rcu_torture_writer is not yet underway */ | |
808 | cur_ops->readunlock(idx); | |
809 | return; | |
810 | } | |
811 | if (p->rtort_mbtest == 0) | |
812 | atomic_inc(&n_rcu_torture_mberror); | |
813 | spin_lock(&rand_lock); | |
814 | cur_ops->read_delay(&rand); | |
815 | n_rcu_torture_timers++; | |
816 | spin_unlock(&rand_lock); | |
817 | preempt_disable(); | |
818 | pipe_count = p->rtort_pipe_count; | |
819 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | |
820 | /* Should not happen, but... */ | |
821 | pipe_count = RCU_TORTURE_PIPE_LEN; | |
822 | } | |
823 | __this_cpu_inc(rcu_torture_count[pipe_count]); | |
824 | completed = cur_ops->completed() - completed; | |
825 | if (completed > RCU_TORTURE_PIPE_LEN) { | |
826 | /* Should not happen, but... */ | |
827 | completed = RCU_TORTURE_PIPE_LEN; | |
828 | } | |
829 | __this_cpu_inc(rcu_torture_batch[completed]); | |
830 | preempt_enable(); | |
831 | cur_ops->readunlock(idx); | |
832 | } | |
833 | ||
834 | /* | |
835 | * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, | |
836 | * incrementing the corresponding element of the pipeline array. The | |
837 | * counter in the element should never be greater than 1, otherwise, the | |
838 | * RCU implementation is broken. | |
839 | */ | |
840 | static int | |
841 | rcu_torture_reader(void *arg) | |
842 | { | |
843 | int completed; | |
844 | int idx; | |
845 | DEFINE_RCU_RANDOM(rand); | |
846 | struct rcu_torture *p; | |
847 | int pipe_count; | |
848 | struct timer_list t; | |
849 | ||
850 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | |
851 | set_user_nice(current, 19); | |
852 | if (irqreader && cur_ops->irq_capable) | |
853 | setup_timer_on_stack(&t, rcu_torture_timer, 0); | |
854 | ||
855 | do { | |
856 | if (irqreader && cur_ops->irq_capable) { | |
857 | if (!timer_pending(&t)) | |
858 | mod_timer(&t, jiffies + 1); | |
859 | } | |
860 | idx = cur_ops->readlock(); | |
861 | completed = cur_ops->completed(); | |
862 | p = rcu_dereference_check(rcu_torture_current, | |
863 | rcu_read_lock_held() || | |
864 | rcu_read_lock_bh_held() || | |
865 | rcu_read_lock_sched_held() || | |
866 | srcu_read_lock_held(&srcu_ctl)); | |
867 | if (p == NULL) { | |
868 | /* Wait for rcu_torture_writer to get underway */ | |
869 | cur_ops->readunlock(idx); | |
870 | schedule_timeout_interruptible(HZ); | |
871 | continue; | |
872 | } | |
873 | if (p->rtort_mbtest == 0) | |
874 | atomic_inc(&n_rcu_torture_mberror); | |
875 | cur_ops->read_delay(&rand); | |
876 | preempt_disable(); | |
877 | pipe_count = p->rtort_pipe_count; | |
878 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | |
879 | /* Should not happen, but... */ | |
880 | pipe_count = RCU_TORTURE_PIPE_LEN; | |
881 | } | |
882 | __this_cpu_inc(rcu_torture_count[pipe_count]); | |
883 | completed = cur_ops->completed() - completed; | |
884 | if (completed > RCU_TORTURE_PIPE_LEN) { | |
885 | /* Should not happen, but... */ | |
886 | completed = RCU_TORTURE_PIPE_LEN; | |
887 | } | |
888 | __this_cpu_inc(rcu_torture_batch[completed]); | |
889 | preempt_enable(); | |
890 | cur_ops->readunlock(idx); | |
891 | schedule(); | |
892 | rcu_stutter_wait("rcu_torture_reader"); | |
893 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | |
894 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | |
895 | rcutorture_shutdown_absorb("rcu_torture_reader"); | |
896 | if (irqreader && cur_ops->irq_capable) | |
897 | del_timer_sync(&t); | |
898 | while (!kthread_should_stop()) | |
899 | schedule_timeout_uninterruptible(1); | |
900 | return 0; | |
901 | } | |
902 | ||
903 | /* | |
904 | * Create an RCU-torture statistics message in the specified buffer. | |
905 | */ | |
906 | static int | |
907 | rcu_torture_printk(char *page) | |
908 | { | |
909 | int cnt = 0; | |
910 | int cpu; | |
911 | int i; | |
912 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; | |
913 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; | |
914 | ||
915 | for_each_possible_cpu(cpu) { | |
916 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { | |
917 | pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; | |
918 | batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; | |
919 | } | |
920 | } | |
921 | for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { | |
922 | if (pipesummary[i] != 0) | |
923 | break; | |
924 | } | |
925 | cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); | |
926 | cnt += sprintf(&page[cnt], | |
927 | "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " | |
928 | "rtmbe: %d nt: %ld", | |
929 | rcu_torture_current, | |
930 | rcu_torture_current_version, | |
931 | list_empty(&rcu_torture_freelist), | |
932 | atomic_read(&n_rcu_torture_alloc), | |
933 | atomic_read(&n_rcu_torture_alloc_fail), | |
934 | atomic_read(&n_rcu_torture_free), | |
935 | atomic_read(&n_rcu_torture_mberror), | |
936 | n_rcu_torture_timers); | |
937 | if (atomic_read(&n_rcu_torture_mberror) != 0) | |
938 | cnt += sprintf(&page[cnt], " !!!"); | |
939 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); | |
940 | if (i > 1) { | |
941 | cnt += sprintf(&page[cnt], "!!! "); | |
942 | atomic_inc(&n_rcu_torture_error); | |
943 | WARN_ON_ONCE(1); | |
944 | } | |
945 | cnt += sprintf(&page[cnt], "Reader Pipe: "); | |
946 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | |
947 | cnt += sprintf(&page[cnt], " %ld", pipesummary[i]); | |
948 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); | |
949 | cnt += sprintf(&page[cnt], "Reader Batch: "); | |
950 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | |
951 | cnt += sprintf(&page[cnt], " %ld", batchsummary[i]); | |
952 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); | |
953 | cnt += sprintf(&page[cnt], "Free-Block Circulation: "); | |
954 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { | |
955 | cnt += sprintf(&page[cnt], " %d", | |
956 | atomic_read(&rcu_torture_wcount[i])); | |
957 | } | |
958 | cnt += sprintf(&page[cnt], "\n"); | |
959 | if (cur_ops->stats) | |
960 | cnt += cur_ops->stats(&page[cnt]); | |
961 | return cnt; | |
962 | } | |
963 | ||
964 | /* | |
965 | * Print torture statistics. Caller must ensure that there is only | |
966 | * one call to this function at a given time!!! This is normally | |
967 | * accomplished by relying on the module system to only have one copy | |
968 | * of the module loaded, and then by giving the rcu_torture_stats | |
969 | * kthread full control (or the init/cleanup functions when rcu_torture_stats | |
970 | * thread is not running). | |
971 | */ | |
972 | static void | |
973 | rcu_torture_stats_print(void) | |
974 | { | |
975 | int cnt; | |
976 | ||
977 | cnt = rcu_torture_printk(printk_buf); | |
978 | printk(KERN_ALERT "%s", printk_buf); | |
979 | } | |
980 | ||
981 | /* | |
982 | * Periodically prints torture statistics, if periodic statistics printing | |
983 | * was specified via the stat_interval module parameter. | |
984 | * | |
985 | * No need to worry about fullstop here, since this one doesn't reference | |
986 | * volatile state or register callbacks. | |
987 | */ | |
988 | static int | |
989 | rcu_torture_stats(void *arg) | |
990 | { | |
991 | VERBOSE_PRINTK_STRING("rcu_torture_stats task started"); | |
992 | do { | |
993 | schedule_timeout_interruptible(stat_interval * HZ); | |
994 | rcu_torture_stats_print(); | |
995 | rcutorture_shutdown_absorb("rcu_torture_stats"); | |
996 | } while (!kthread_should_stop()); | |
997 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | |
998 | return 0; | |
999 | } | |
1000 | ||
1001 | static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |
1002 | ||
1003 | /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case | |
1004 | * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs. | |
1005 | */ | |
1006 | static void rcu_torture_shuffle_tasks(void) | |
1007 | { | |
1008 | int i; | |
1009 | ||
1010 | cpumask_setall(shuffle_tmp_mask); | |
1011 | get_online_cpus(); | |
1012 | ||
1013 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | |
1014 | if (num_online_cpus() == 1) { | |
1015 | put_online_cpus(); | |
1016 | return; | |
1017 | } | |
1018 | ||
1019 | if (rcu_idle_cpu != -1) | |
1020 | cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask); | |
1021 | ||
1022 | set_cpus_allowed_ptr(current, shuffle_tmp_mask); | |
1023 | ||
1024 | if (reader_tasks) { | |
1025 | for (i = 0; i < nrealreaders; i++) | |
1026 | if (reader_tasks[i]) | |
1027 | set_cpus_allowed_ptr(reader_tasks[i], | |
1028 | shuffle_tmp_mask); | |
1029 | } | |
1030 | ||
1031 | if (fakewriter_tasks) { | |
1032 | for (i = 0; i < nfakewriters; i++) | |
1033 | if (fakewriter_tasks[i]) | |
1034 | set_cpus_allowed_ptr(fakewriter_tasks[i], | |
1035 | shuffle_tmp_mask); | |
1036 | } | |
1037 | ||
1038 | if (writer_task) | |
1039 | set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); | |
1040 | ||
1041 | if (stats_task) | |
1042 | set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); | |
1043 | ||
1044 | if (rcu_idle_cpu == -1) | |
1045 | rcu_idle_cpu = num_online_cpus() - 1; | |
1046 | else | |
1047 | rcu_idle_cpu--; | |
1048 | ||
1049 | put_online_cpus(); | |
1050 | } | |
1051 | ||
1052 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | |
1053 | * system to become idle at a time and cut off its timer ticks. This is meant | |
1054 | * to test the support for such tickless idle CPU in RCU. | |
1055 | */ | |
1056 | static int | |
1057 | rcu_torture_shuffle(void *arg) | |
1058 | { | |
1059 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started"); | |
1060 | do { | |
1061 | schedule_timeout_interruptible(shuffle_interval * HZ); | |
1062 | rcu_torture_shuffle_tasks(); | |
1063 | rcutorture_shutdown_absorb("rcu_torture_shuffle"); | |
1064 | } while (!kthread_should_stop()); | |
1065 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | |
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | /* Cause the rcutorture test to "stutter", starting and stopping all | |
1070 | * threads periodically. | |
1071 | */ | |
1072 | static int | |
1073 | rcu_torture_stutter(void *arg) | |
1074 | { | |
1075 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task started"); | |
1076 | do { | |
1077 | schedule_timeout_interruptible(stutter * HZ); | |
1078 | stutter_pause_test = 1; | |
1079 | if (!kthread_should_stop()) | |
1080 | schedule_timeout_interruptible(stutter * HZ); | |
1081 | stutter_pause_test = 0; | |
1082 | rcutorture_shutdown_absorb("rcu_torture_stutter"); | |
1083 | } while (!kthread_should_stop()); | |
1084 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | |
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | static inline void | |
1089 | rcu_torture_print_module_parms(char *tag) | |
1090 | { | |
1091 | printk(KERN_ALERT "%s" TORTURE_FLAG | |
1092 | "--- %s: nreaders=%d nfakewriters=%d " | |
1093 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | |
1094 | "shuffle_interval=%d stutter=%d irqreader=%d " | |
1095 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n", | |
1096 | torture_type, tag, nrealreaders, nfakewriters, | |
1097 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, | |
1098 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter); | |
1099 | } | |
1100 | ||
1101 | static struct notifier_block rcutorture_nb = { | |
1102 | .notifier_call = rcutorture_shutdown_notify, | |
1103 | }; | |
1104 | ||
1105 | static void | |
1106 | rcu_torture_cleanup(void) | |
1107 | { | |
1108 | int i; | |
1109 | ||
1110 | mutex_lock(&fullstop_mutex); | |
1111 | if (fullstop == FULLSTOP_SHUTDOWN) { | |
1112 | printk(KERN_WARNING /* but going down anyway, so... */ | |
1113 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | |
1114 | mutex_unlock(&fullstop_mutex); | |
1115 | schedule_timeout_uninterruptible(10); | |
1116 | if (cur_ops->cb_barrier != NULL) | |
1117 | cur_ops->cb_barrier(); | |
1118 | return; | |
1119 | } | |
1120 | fullstop = FULLSTOP_RMMOD; | |
1121 | mutex_unlock(&fullstop_mutex); | |
1122 | unregister_reboot_notifier(&rcutorture_nb); | |
1123 | if (stutter_task) { | |
1124 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); | |
1125 | kthread_stop(stutter_task); | |
1126 | } | |
1127 | stutter_task = NULL; | |
1128 | if (shuffler_task) { | |
1129 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); | |
1130 | kthread_stop(shuffler_task); | |
1131 | free_cpumask_var(shuffle_tmp_mask); | |
1132 | } | |
1133 | shuffler_task = NULL; | |
1134 | ||
1135 | if (writer_task) { | |
1136 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); | |
1137 | kthread_stop(writer_task); | |
1138 | } | |
1139 | writer_task = NULL; | |
1140 | ||
1141 | if (reader_tasks) { | |
1142 | for (i = 0; i < nrealreaders; i++) { | |
1143 | if (reader_tasks[i]) { | |
1144 | VERBOSE_PRINTK_STRING( | |
1145 | "Stopping rcu_torture_reader task"); | |
1146 | kthread_stop(reader_tasks[i]); | |
1147 | } | |
1148 | reader_tasks[i] = NULL; | |
1149 | } | |
1150 | kfree(reader_tasks); | |
1151 | reader_tasks = NULL; | |
1152 | } | |
1153 | rcu_torture_current = NULL; | |
1154 | ||
1155 | if (fakewriter_tasks) { | |
1156 | for (i = 0; i < nfakewriters; i++) { | |
1157 | if (fakewriter_tasks[i]) { | |
1158 | VERBOSE_PRINTK_STRING( | |
1159 | "Stopping rcu_torture_fakewriter task"); | |
1160 | kthread_stop(fakewriter_tasks[i]); | |
1161 | } | |
1162 | fakewriter_tasks[i] = NULL; | |
1163 | } | |
1164 | kfree(fakewriter_tasks); | |
1165 | fakewriter_tasks = NULL; | |
1166 | } | |
1167 | ||
1168 | if (stats_task) { | |
1169 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); | |
1170 | kthread_stop(stats_task); | |
1171 | } | |
1172 | stats_task = NULL; | |
1173 | ||
1174 | if (fqs_task) { | |
1175 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task"); | |
1176 | kthread_stop(fqs_task); | |
1177 | } | |
1178 | fqs_task = NULL; | |
1179 | ||
1180 | /* Wait for all RCU callbacks to fire. */ | |
1181 | ||
1182 | if (cur_ops->cb_barrier != NULL) | |
1183 | cur_ops->cb_barrier(); | |
1184 | ||
1185 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ | |
1186 | ||
1187 | if (cur_ops->cleanup) | |
1188 | cur_ops->cleanup(); | |
1189 | if (atomic_read(&n_rcu_torture_error)) | |
1190 | rcu_torture_print_module_parms("End of test: FAILURE"); | |
1191 | else | |
1192 | rcu_torture_print_module_parms("End of test: SUCCESS"); | |
1193 | } | |
1194 | ||
1195 | static int __init | |
1196 | rcu_torture_init(void) | |
1197 | { | |
1198 | int i; | |
1199 | int cpu; | |
1200 | int firsterr = 0; | |
1201 | static struct rcu_torture_ops *torture_ops[] = | |
1202 | { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, | |
1203 | &rcu_bh_ops, &rcu_bh_sync_ops, | |
1204 | &srcu_ops, &srcu_expedited_ops, | |
1205 | &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; | |
1206 | ||
1207 | mutex_lock(&fullstop_mutex); | |
1208 | ||
1209 | /* Process args and tell the world that the torturer is on the job. */ | |
1210 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | |
1211 | cur_ops = torture_ops[i]; | |
1212 | if (strcmp(torture_type, cur_ops->name) == 0) | |
1213 | break; | |
1214 | } | |
1215 | if (i == ARRAY_SIZE(torture_ops)) { | |
1216 | printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n", | |
1217 | torture_type); | |
1218 | printk(KERN_ALERT "rcu-torture types:"); | |
1219 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | |
1220 | printk(KERN_ALERT " %s", torture_ops[i]->name); | |
1221 | printk(KERN_ALERT "\n"); | |
1222 | mutex_unlock(&fullstop_mutex); | |
1223 | return -EINVAL; | |
1224 | } | |
1225 | if (cur_ops->fqs == NULL && fqs_duration != 0) { | |
1226 | printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero " | |
1227 | "fqs_duration, fqs disabled.\n"); | |
1228 | fqs_duration = 0; | |
1229 | } | |
1230 | if (cur_ops->init) | |
1231 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ | |
1232 | ||
1233 | if (nreaders >= 0) | |
1234 | nrealreaders = nreaders; | |
1235 | else | |
1236 | nrealreaders = 2 * num_online_cpus(); | |
1237 | rcu_torture_print_module_parms("Start of test"); | |
1238 | fullstop = FULLSTOP_DONTSTOP; | |
1239 | ||
1240 | /* Set up the freelist. */ | |
1241 | ||
1242 | INIT_LIST_HEAD(&rcu_torture_freelist); | |
1243 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { | |
1244 | rcu_tortures[i].rtort_mbtest = 0; | |
1245 | list_add_tail(&rcu_tortures[i].rtort_free, | |
1246 | &rcu_torture_freelist); | |
1247 | } | |
1248 | ||
1249 | /* Initialize the statistics so that each run gets its own numbers. */ | |
1250 | ||
1251 | rcu_torture_current = NULL; | |
1252 | rcu_torture_current_version = 0; | |
1253 | atomic_set(&n_rcu_torture_alloc, 0); | |
1254 | atomic_set(&n_rcu_torture_alloc_fail, 0); | |
1255 | atomic_set(&n_rcu_torture_free, 0); | |
1256 | atomic_set(&n_rcu_torture_mberror, 0); | |
1257 | atomic_set(&n_rcu_torture_error, 0); | |
1258 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | |
1259 | atomic_set(&rcu_torture_wcount[i], 0); | |
1260 | for_each_possible_cpu(cpu) { | |
1261 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { | |
1262 | per_cpu(rcu_torture_count, cpu)[i] = 0; | |
1263 | per_cpu(rcu_torture_batch, cpu)[i] = 0; | |
1264 | } | |
1265 | } | |
1266 | ||
1267 | /* Start up the kthreads. */ | |
1268 | ||
1269 | VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); | |
1270 | writer_task = kthread_run(rcu_torture_writer, NULL, | |
1271 | "rcu_torture_writer"); | |
1272 | if (IS_ERR(writer_task)) { | |
1273 | firsterr = PTR_ERR(writer_task); | |
1274 | VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); | |
1275 | writer_task = NULL; | |
1276 | goto unwind; | |
1277 | } | |
1278 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), | |
1279 | GFP_KERNEL); | |
1280 | if (fakewriter_tasks == NULL) { | |
1281 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | |
1282 | firsterr = -ENOMEM; | |
1283 | goto unwind; | |
1284 | } | |
1285 | for (i = 0; i < nfakewriters; i++) { | |
1286 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); | |
1287 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, | |
1288 | "rcu_torture_fakewriter"); | |
1289 | if (IS_ERR(fakewriter_tasks[i])) { | |
1290 | firsterr = PTR_ERR(fakewriter_tasks[i]); | |
1291 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); | |
1292 | fakewriter_tasks[i] = NULL; | |
1293 | goto unwind; | |
1294 | } | |
1295 | } | |
1296 | reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), | |
1297 | GFP_KERNEL); | |
1298 | if (reader_tasks == NULL) { | |
1299 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | |
1300 | firsterr = -ENOMEM; | |
1301 | goto unwind; | |
1302 | } | |
1303 | for (i = 0; i < nrealreaders; i++) { | |
1304 | VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task"); | |
1305 | reader_tasks[i] = kthread_run(rcu_torture_reader, NULL, | |
1306 | "rcu_torture_reader"); | |
1307 | if (IS_ERR(reader_tasks[i])) { | |
1308 | firsterr = PTR_ERR(reader_tasks[i]); | |
1309 | VERBOSE_PRINTK_ERRSTRING("Failed to create reader"); | |
1310 | reader_tasks[i] = NULL; | |
1311 | goto unwind; | |
1312 | } | |
1313 | } | |
1314 | if (stat_interval > 0) { | |
1315 | VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task"); | |
1316 | stats_task = kthread_run(rcu_torture_stats, NULL, | |
1317 | "rcu_torture_stats"); | |
1318 | if (IS_ERR(stats_task)) { | |
1319 | firsterr = PTR_ERR(stats_task); | |
1320 | VERBOSE_PRINTK_ERRSTRING("Failed to create stats"); | |
1321 | stats_task = NULL; | |
1322 | goto unwind; | |
1323 | } | |
1324 | } | |
1325 | if (test_no_idle_hz) { | |
1326 | rcu_idle_cpu = num_online_cpus() - 1; | |
1327 | ||
1328 | if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | |
1329 | firsterr = -ENOMEM; | |
1330 | VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask"); | |
1331 | goto unwind; | |
1332 | } | |
1333 | ||
1334 | /* Create the shuffler thread */ | |
1335 | shuffler_task = kthread_run(rcu_torture_shuffle, NULL, | |
1336 | "rcu_torture_shuffle"); | |
1337 | if (IS_ERR(shuffler_task)) { | |
1338 | free_cpumask_var(shuffle_tmp_mask); | |
1339 | firsterr = PTR_ERR(shuffler_task); | |
1340 | VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); | |
1341 | shuffler_task = NULL; | |
1342 | goto unwind; | |
1343 | } | |
1344 | } | |
1345 | if (stutter < 0) | |
1346 | stutter = 0; | |
1347 | if (stutter) { | |
1348 | /* Create the stutter thread */ | |
1349 | stutter_task = kthread_run(rcu_torture_stutter, NULL, | |
1350 | "rcu_torture_stutter"); | |
1351 | if (IS_ERR(stutter_task)) { | |
1352 | firsterr = PTR_ERR(stutter_task); | |
1353 | VERBOSE_PRINTK_ERRSTRING("Failed to create stutter"); | |
1354 | stutter_task = NULL; | |
1355 | goto unwind; | |
1356 | } | |
1357 | } | |
1358 | if (fqs_duration < 0) | |
1359 | fqs_duration = 0; | |
1360 | if (fqs_duration) { | |
1361 | /* Create the stutter thread */ | |
1362 | fqs_task = kthread_run(rcu_torture_fqs, NULL, | |
1363 | "rcu_torture_fqs"); | |
1364 | if (IS_ERR(fqs_task)) { | |
1365 | firsterr = PTR_ERR(fqs_task); | |
1366 | VERBOSE_PRINTK_ERRSTRING("Failed to create fqs"); | |
1367 | fqs_task = NULL; | |
1368 | goto unwind; | |
1369 | } | |
1370 | } | |
1371 | register_reboot_notifier(&rcutorture_nb); | |
1372 | mutex_unlock(&fullstop_mutex); | |
1373 | return 0; | |
1374 | ||
1375 | unwind: | |
1376 | mutex_unlock(&fullstop_mutex); | |
1377 | rcu_torture_cleanup(); | |
1378 | return firsterr; | |
1379 | } | |
1380 | ||
1381 | module_init(rcu_torture_init); | |
1382 | module_exit(rcu_torture_cleanup); |