2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53 "Josh Triplett <josh@freedesktop.org>");
55 static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters = 4; /* # fake writer threads */
57 static int stat_interval; /* Interval between stats, in seconds. */
58 /* Defaults to "only at end of test". */
59 static int verbose; /* Print more debug info. */
60 static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62 static int stutter = 5; /* Start/stop testing interval (in sec) */
63 static int irqreader = 1; /* RCU readers from irq (timers). */
64 static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
65 static int fqs_holdoff; /* Hold time within burst (us). */
66 static int fqs_stutter = 3; /* Wait time between bursts (s). */
67 static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
68 static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
69 static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
70 static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
71 static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
72 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
74 module_param(nreaders, int, 0444);
75 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
76 module_param(nfakewriters, int, 0444);
77 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
78 module_param(stat_interval, int, 0644);
79 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
80 module_param(verbose, bool, 0444);
81 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
82 module_param(test_no_idle_hz, bool, 0444);
83 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
84 module_param(shuffle_interval, int, 0444);
85 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
86 module_param(stutter, int, 0444);
87 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
88 module_param(irqreader, int, 0444);
89 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
90 module_param(fqs_duration, int, 0444);
91 MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
92 module_param(fqs_holdoff, int, 0444);
93 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
94 module_param(fqs_stutter, int, 0444);
95 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
96 module_param(onoff_interval, int, 0444);
97 MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
98 module_param(shutdown_secs, int, 0444);
99 MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
100 module_param(test_boost, int, 0444);
101 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
102 module_param(test_boost_interval, int, 0444);
103 MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
104 module_param(test_boost_duration, int, 0444);
105 MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
106 module_param(torture_type, charp, 0444);
107 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
109 #define TORTURE_FLAG "-torture:"
110 #define PRINTK_STRING(s) \
111 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
112 #define VERBOSE_PRINTK_STRING(s) \
113 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
114 #define VERBOSE_PRINTK_ERRSTRING(s) \
115 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
117 static char printk_buf[4096];
119 static int nrealreaders;
120 static struct task_struct *writer_task;
121 static struct task_struct **fakewriter_tasks;
122 static struct task_struct **reader_tasks;
123 static struct task_struct *stats_task;
124 static struct task_struct *shuffler_task;
125 static struct task_struct *stutter_task;
126 static struct task_struct *fqs_task;
127 static struct task_struct *boost_tasks[NR_CPUS];
128 static struct task_struct *shutdown_task;
129 #ifdef CONFIG_HOTPLUG_CPU
130 static struct task_struct *onoff_task;
131 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
133 #define RCU_TORTURE_PIPE_LEN 10
136 struct rcu_head rtort_rcu;
137 int rtort_pipe_count;
138 struct list_head rtort_free;
142 static LIST_HEAD(rcu_torture_freelist);
143 static struct rcu_torture __rcu *rcu_torture_current;
144 static unsigned long rcu_torture_current_version;
145 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
146 static DEFINE_SPINLOCK(rcu_torture_lock);
147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
149 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
151 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
152 static atomic_t n_rcu_torture_alloc;
153 static atomic_t n_rcu_torture_alloc_fail;
154 static atomic_t n_rcu_torture_free;
155 static atomic_t n_rcu_torture_mberror;
156 static atomic_t n_rcu_torture_error;
157 static long n_rcu_torture_boost_ktrerror;
158 static long n_rcu_torture_boost_rterror;
159 static long n_rcu_torture_boost_failure;
160 static long n_rcu_torture_boosts;
161 static long n_rcu_torture_timers;
162 static long n_offline_attempts;
163 static long n_offline_successes;
164 static long n_online_attempts;
165 static long n_online_successes;
166 static struct list_head rcu_torture_removed;
167 static cpumask_var_t shuffle_tmp_mask;
169 static int stutter_pause_test;
171 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
172 #define RCUTORTURE_RUNNABLE_INIT 1
174 #define RCUTORTURE_RUNNABLE_INIT 0
176 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
177 module_param(rcutorture_runnable, int, 0444);
178 MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
180 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
181 #define rcu_can_boost() 1
182 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
183 #define rcu_can_boost() 0
184 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
186 static unsigned long shutdown_time; /* jiffies to system shutdown. */
187 static unsigned long boost_starttime; /* jiffies of next boost test start. */
188 DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
189 /* and boost task create/destroy. */
191 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
193 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */
194 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
195 #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
196 static int fullstop = FULLSTOP_RMMOD;
198 * Protect fullstop transitions and spawning of kthreads.
200 static DEFINE_MUTEX(fullstop_mutex);
202 /* Forward reference. */
203 static void rcu_torture_cleanup(void);
206 * Detect and respond to a system shutdown.
209 rcutorture_shutdown_notify(struct notifier_block *unused1,
210 unsigned long unused2, void *unused3)
212 mutex_lock(&fullstop_mutex);
213 if (fullstop == FULLSTOP_DONTSTOP)
214 fullstop = FULLSTOP_SHUTDOWN;
216 printk(KERN_WARNING /* but going down anyway, so... */
217 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
218 mutex_unlock(&fullstop_mutex);
223 * Absorb kthreads into a kernel function that won't return, so that
224 * they won't ever access module text or data again.
226 static void rcutorture_shutdown_absorb(char *title)
228 if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
230 "rcutorture thread %s parking due to system shutdown\n",
232 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
237 * Allocate an element from the rcu_tortures pool.
239 static struct rcu_torture *
240 rcu_torture_alloc(void)
244 spin_lock_bh(&rcu_torture_lock);
245 if (list_empty(&rcu_torture_freelist)) {
246 atomic_inc(&n_rcu_torture_alloc_fail);
247 spin_unlock_bh(&rcu_torture_lock);
250 atomic_inc(&n_rcu_torture_alloc);
251 p = rcu_torture_freelist.next;
253 spin_unlock_bh(&rcu_torture_lock);
254 return container_of(p, struct rcu_torture, rtort_free);
258 * Free an element to the rcu_tortures pool.
261 rcu_torture_free(struct rcu_torture *p)
263 atomic_inc(&n_rcu_torture_free);
264 spin_lock_bh(&rcu_torture_lock);
265 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
266 spin_unlock_bh(&rcu_torture_lock);
269 struct rcu_random_state {
270 unsigned long rrs_state;
274 #define RCU_RANDOM_MULT 39916801 /* prime */
275 #define RCU_RANDOM_ADD 479001701 /* prime */
276 #define RCU_RANDOM_REFRESH 10000
278 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
281 * Crude but fast random-number generator. Uses a linear congruential
282 * generator, with occasional help from cpu_clock().
285 rcu_random(struct rcu_random_state *rrsp)
287 if (--rrsp->rrs_count < 0) {
288 rrsp->rrs_state += (unsigned long)local_clock();
289 rrsp->rrs_count = RCU_RANDOM_REFRESH;
291 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
292 return swahw32(rrsp->rrs_state);
296 rcu_stutter_wait(char *title)
298 while (stutter_pause_test || !rcutorture_runnable) {
299 if (rcutorture_runnable)
300 schedule_timeout_interruptible(1);
302 schedule_timeout_interruptible(round_jiffies_relative(HZ));
303 rcutorture_shutdown_absorb(title);
308 * Operations vector for selecting different types of tests.
311 struct rcu_torture_ops {
313 void (*cleanup)(void);
314 int (*readlock)(void);
315 void (*read_delay)(struct rcu_random_state *rrsp);
316 void (*readunlock)(int idx);
317 int (*completed)(void);
318 void (*deferred_free)(struct rcu_torture *p);
320 void (*cb_barrier)(void);
322 int (*stats)(char *page);
328 static struct rcu_torture_ops *cur_ops;
331 * Definitions for rcu torture testing.
334 static int rcu_torture_read_lock(void) __acquires(RCU)
340 static void rcu_read_delay(struct rcu_random_state *rrsp)
342 const unsigned long shortdelay_us = 200;
343 const unsigned long longdelay_ms = 50;
345 /* We want a short delay sometimes to make a reader delay the grace
346 * period, and we want a long delay occasionally to trigger
347 * force_quiescent_state. */
349 if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
350 mdelay(longdelay_ms);
351 if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
352 udelay(shortdelay_us);
353 #ifdef CONFIG_PREEMPT
354 if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
355 preempt_schedule(); /* No QS if preempt_disable() in effect */
359 static void rcu_torture_read_unlock(int idx) __releases(RCU)
364 static int rcu_torture_completed(void)
366 return rcu_batches_completed();
370 rcu_torture_cb(struct rcu_head *p)
373 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
375 if (fullstop != FULLSTOP_DONTSTOP) {
376 /* Test is ending, just drop callbacks on the floor. */
377 /* The next initialization will pick up the pieces. */
380 i = rp->rtort_pipe_count;
381 if (i > RCU_TORTURE_PIPE_LEN)
382 i = RCU_TORTURE_PIPE_LEN;
383 atomic_inc(&rcu_torture_wcount[i]);
384 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
385 rp->rtort_mbtest = 0;
386 rcu_torture_free(rp);
388 cur_ops->deferred_free(rp);
391 static int rcu_no_completed(void)
396 static void rcu_torture_deferred_free(struct rcu_torture *p)
398 call_rcu(&p->rtort_rcu, rcu_torture_cb);
401 static struct rcu_torture_ops rcu_ops = {
404 .readlock = rcu_torture_read_lock,
405 .read_delay = rcu_read_delay,
406 .readunlock = rcu_torture_read_unlock,
407 .completed = rcu_torture_completed,
408 .deferred_free = rcu_torture_deferred_free,
409 .sync = synchronize_rcu,
410 .cb_barrier = rcu_barrier,
411 .fqs = rcu_force_quiescent_state,
414 .can_boost = rcu_can_boost(),
418 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
421 struct rcu_torture *rp;
422 struct rcu_torture *rp1;
425 list_add(&p->rtort_free, &rcu_torture_removed);
426 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
427 i = rp->rtort_pipe_count;
428 if (i > RCU_TORTURE_PIPE_LEN)
429 i = RCU_TORTURE_PIPE_LEN;
430 atomic_inc(&rcu_torture_wcount[i]);
431 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
432 rp->rtort_mbtest = 0;
433 list_del(&rp->rtort_free);
434 rcu_torture_free(rp);
439 static void rcu_sync_torture_init(void)
441 INIT_LIST_HEAD(&rcu_torture_removed);
444 static struct rcu_torture_ops rcu_sync_ops = {
445 .init = rcu_sync_torture_init,
447 .readlock = rcu_torture_read_lock,
448 .read_delay = rcu_read_delay,
449 .readunlock = rcu_torture_read_unlock,
450 .completed = rcu_torture_completed,
451 .deferred_free = rcu_sync_torture_deferred_free,
452 .sync = synchronize_rcu,
454 .fqs = rcu_force_quiescent_state,
457 .can_boost = rcu_can_boost(),
461 static struct rcu_torture_ops rcu_expedited_ops = {
462 .init = rcu_sync_torture_init,
464 .readlock = rcu_torture_read_lock,
465 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
466 .readunlock = rcu_torture_read_unlock,
467 .completed = rcu_no_completed,
468 .deferred_free = rcu_sync_torture_deferred_free,
469 .sync = synchronize_rcu_expedited,
471 .fqs = rcu_force_quiescent_state,
474 .can_boost = rcu_can_boost(),
475 .name = "rcu_expedited"
479 * Definitions for rcu_bh torture testing.
482 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
488 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
490 rcu_read_unlock_bh();
493 static int rcu_bh_torture_completed(void)
495 return rcu_batches_completed_bh();
498 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
500 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
503 static struct rcu_torture_ops rcu_bh_ops = {
506 .readlock = rcu_bh_torture_read_lock,
507 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
508 .readunlock = rcu_bh_torture_read_unlock,
509 .completed = rcu_bh_torture_completed,
510 .deferred_free = rcu_bh_torture_deferred_free,
511 .sync = synchronize_rcu_bh,
512 .cb_barrier = rcu_barrier_bh,
513 .fqs = rcu_bh_force_quiescent_state,
519 static struct rcu_torture_ops rcu_bh_sync_ops = {
520 .init = rcu_sync_torture_init,
522 .readlock = rcu_bh_torture_read_lock,
523 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
524 .readunlock = rcu_bh_torture_read_unlock,
525 .completed = rcu_bh_torture_completed,
526 .deferred_free = rcu_sync_torture_deferred_free,
527 .sync = synchronize_rcu_bh,
529 .fqs = rcu_bh_force_quiescent_state,
532 .name = "rcu_bh_sync"
535 static struct rcu_torture_ops rcu_bh_expedited_ops = {
536 .init = rcu_sync_torture_init,
538 .readlock = rcu_bh_torture_read_lock,
539 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
540 .readunlock = rcu_bh_torture_read_unlock,
541 .completed = rcu_bh_torture_completed,
542 .deferred_free = rcu_sync_torture_deferred_free,
543 .sync = synchronize_rcu_bh_expedited,
545 .fqs = rcu_bh_force_quiescent_state,
548 .name = "rcu_bh_expedited"
552 * Definitions for srcu torture testing.
555 static struct srcu_struct srcu_ctl;
557 static void srcu_torture_init(void)
559 init_srcu_struct(&srcu_ctl);
560 rcu_sync_torture_init();
563 static void srcu_torture_cleanup(void)
565 synchronize_srcu(&srcu_ctl);
566 cleanup_srcu_struct(&srcu_ctl);
569 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
571 return srcu_read_lock(&srcu_ctl);
574 static void srcu_read_delay(struct rcu_random_state *rrsp)
577 const long uspertick = 1000000 / HZ;
578 const long longdelay = 10;
580 /* We want there to be long-running readers, but not all the time. */
582 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
584 schedule_timeout_interruptible(longdelay);
586 rcu_read_delay(rrsp);
589 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
591 srcu_read_unlock(&srcu_ctl, idx);
594 static int srcu_torture_completed(void)
596 return srcu_batches_completed(&srcu_ctl);
599 static void srcu_torture_synchronize(void)
601 synchronize_srcu(&srcu_ctl);
604 static int srcu_torture_stats(char *page)
608 int idx = srcu_ctl.completed & 0x1;
610 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
611 torture_type, TORTURE_FLAG, idx);
612 for_each_possible_cpu(cpu) {
613 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
614 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
615 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
617 cnt += sprintf(&page[cnt], "\n");
621 static struct rcu_torture_ops srcu_ops = {
622 .init = srcu_torture_init,
623 .cleanup = srcu_torture_cleanup,
624 .readlock = srcu_torture_read_lock,
625 .read_delay = srcu_read_delay,
626 .readunlock = srcu_torture_read_unlock,
627 .completed = srcu_torture_completed,
628 .deferred_free = rcu_sync_torture_deferred_free,
629 .sync = srcu_torture_synchronize,
631 .stats = srcu_torture_stats,
635 static void srcu_torture_synchronize_expedited(void)
637 synchronize_srcu_expedited(&srcu_ctl);
640 static struct rcu_torture_ops srcu_expedited_ops = {
641 .init = srcu_torture_init,
642 .cleanup = srcu_torture_cleanup,
643 .readlock = srcu_torture_read_lock,
644 .read_delay = srcu_read_delay,
645 .readunlock = srcu_torture_read_unlock,
646 .completed = srcu_torture_completed,
647 .deferred_free = rcu_sync_torture_deferred_free,
648 .sync = srcu_torture_synchronize_expedited,
650 .stats = srcu_torture_stats,
651 .name = "srcu_expedited"
655 * Definitions for sched torture testing.
658 static int sched_torture_read_lock(void)
664 static void sched_torture_read_unlock(int idx)
669 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
671 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
674 static struct rcu_torture_ops sched_ops = {
675 .init = rcu_sync_torture_init,
677 .readlock = sched_torture_read_lock,
678 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
679 .readunlock = sched_torture_read_unlock,
680 .completed = rcu_no_completed,
681 .deferred_free = rcu_sched_torture_deferred_free,
682 .sync = synchronize_sched,
683 .cb_barrier = rcu_barrier_sched,
684 .fqs = rcu_sched_force_quiescent_state,
690 static struct rcu_torture_ops sched_sync_ops = {
691 .init = rcu_sync_torture_init,
693 .readlock = sched_torture_read_lock,
694 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
695 .readunlock = sched_torture_read_unlock,
696 .completed = rcu_no_completed,
697 .deferred_free = rcu_sync_torture_deferred_free,
698 .sync = synchronize_sched,
700 .fqs = rcu_sched_force_quiescent_state,
705 static struct rcu_torture_ops sched_expedited_ops = {
706 .init = rcu_sync_torture_init,
708 .readlock = sched_torture_read_lock,
709 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
710 .readunlock = sched_torture_read_unlock,
711 .completed = rcu_no_completed,
712 .deferred_free = rcu_sync_torture_deferred_free,
713 .sync = synchronize_sched_expedited,
715 .fqs = rcu_sched_force_quiescent_state,
718 .name = "sched_expedited"
722 * RCU torture priority-boost testing. Runs one real-time thread per
723 * CPU for moderate bursts, repeatedly registering RCU callbacks and
724 * spinning waiting for them to be invoked. If a given callback takes
725 * too long to be invoked, we assume that priority inversion has occurred.
728 struct rcu_boost_inflight {
733 static void rcu_torture_boost_cb(struct rcu_head *head)
735 struct rcu_boost_inflight *rbip =
736 container_of(head, struct rcu_boost_inflight, rcu);
738 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
742 static int rcu_torture_boost(void *arg)
744 unsigned long call_rcu_time;
745 unsigned long endtime;
746 unsigned long oldstarttime;
747 struct rcu_boost_inflight rbi = { .inflight = 0 };
748 struct sched_param sp;
750 VERBOSE_PRINTK_STRING("rcu_torture_boost started");
752 /* Set real-time priority. */
753 sp.sched_priority = 1;
754 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
755 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
756 n_rcu_torture_boost_rterror++;
759 init_rcu_head_on_stack(&rbi.rcu);
760 /* Each pass through the following loop does one boost-test cycle. */
762 /* Wait for the next test interval. */
763 oldstarttime = boost_starttime;
764 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
765 schedule_timeout_uninterruptible(1);
766 rcu_stutter_wait("rcu_torture_boost");
767 if (kthread_should_stop() ||
768 fullstop != FULLSTOP_DONTSTOP)
772 /* Do one boost-test interval. */
773 endtime = oldstarttime + test_boost_duration * HZ;
774 call_rcu_time = jiffies;
775 while (ULONG_CMP_LT(jiffies, endtime)) {
776 /* If we don't have a callback in flight, post one. */
778 smp_mb(); /* RCU core before ->inflight = 1. */
780 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
781 if (jiffies - call_rcu_time >
782 test_boost_duration * HZ - HZ / 2) {
783 VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
784 n_rcu_torture_boost_failure++;
786 call_rcu_time = jiffies;
789 rcu_stutter_wait("rcu_torture_boost");
790 if (kthread_should_stop() ||
791 fullstop != FULLSTOP_DONTSTOP)
796 * Set the start time of the next test interval.
797 * Yes, this is vulnerable to long delays, but such
798 * delays simply cause a false negative for the next
799 * interval. Besides, we are running at RT priority,
800 * so delays should be relatively rare.
802 while (oldstarttime == boost_starttime &&
803 !kthread_should_stop()) {
804 if (mutex_trylock(&boost_mutex)) {
805 boost_starttime = jiffies +
806 test_boost_interval * HZ;
807 n_rcu_torture_boosts++;
808 mutex_unlock(&boost_mutex);
811 schedule_timeout_uninterruptible(1);
814 /* Go do the stutter. */
815 checkwait: rcu_stutter_wait("rcu_torture_boost");
816 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
818 /* Clean up and exit. */
819 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
820 rcutorture_shutdown_absorb("rcu_torture_boost");
821 while (!kthread_should_stop() || rbi.inflight)
822 schedule_timeout_uninterruptible(1);
823 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
824 destroy_rcu_head_on_stack(&rbi.rcu);
829 * RCU torture force-quiescent-state kthread. Repeatedly induces
830 * bursts of calls to force_quiescent_state(), increasing the probability
831 * of occurrence of some important types of race conditions.
834 rcu_torture_fqs(void *arg)
836 unsigned long fqs_resume_time;
837 int fqs_burst_remaining;
839 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
841 fqs_resume_time = jiffies + fqs_stutter * HZ;
842 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
843 !kthread_should_stop()) {
844 schedule_timeout_interruptible(1);
846 fqs_burst_remaining = fqs_duration;
847 while (fqs_burst_remaining > 0 &&
848 !kthread_should_stop()) {
851 fqs_burst_remaining -= fqs_holdoff;
853 rcu_stutter_wait("rcu_torture_fqs");
854 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
855 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
856 rcutorture_shutdown_absorb("rcu_torture_fqs");
857 while (!kthread_should_stop())
858 schedule_timeout_uninterruptible(1);
863 * RCU torture writer kthread. Repeatedly substitutes a new structure
864 * for that pointed to by rcu_torture_current, freeing the old structure
865 * after a series of grace periods (the "pipeline").
868 rcu_torture_writer(void *arg)
871 long oldbatch = rcu_batches_completed();
872 struct rcu_torture *rp;
873 struct rcu_torture *old_rp;
874 static DEFINE_RCU_RANDOM(rand);
876 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
877 set_user_nice(current, 19);
880 schedule_timeout_uninterruptible(1);
881 rp = rcu_torture_alloc();
884 rp->rtort_pipe_count = 0;
885 udelay(rcu_random(&rand) & 0x3ff);
886 old_rp = rcu_dereference_check(rcu_torture_current,
887 current == writer_task);
888 rp->rtort_mbtest = 1;
889 rcu_assign_pointer(rcu_torture_current, rp);
890 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
892 i = old_rp->rtort_pipe_count;
893 if (i > RCU_TORTURE_PIPE_LEN)
894 i = RCU_TORTURE_PIPE_LEN;
895 atomic_inc(&rcu_torture_wcount[i]);
896 old_rp->rtort_pipe_count++;
897 cur_ops->deferred_free(old_rp);
899 rcutorture_record_progress(++rcu_torture_current_version);
900 oldbatch = cur_ops->completed();
901 rcu_stutter_wait("rcu_torture_writer");
902 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
903 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
904 rcutorture_shutdown_absorb("rcu_torture_writer");
905 while (!kthread_should_stop())
906 schedule_timeout_uninterruptible(1);
911 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
912 * delay between calls.
915 rcu_torture_fakewriter(void *arg)
917 DEFINE_RCU_RANDOM(rand);
919 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
920 set_user_nice(current, 19);
923 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
924 udelay(rcu_random(&rand) & 0x3ff);
926 rcu_stutter_wait("rcu_torture_fakewriter");
927 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
929 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
930 rcutorture_shutdown_absorb("rcu_torture_fakewriter");
931 while (!kthread_should_stop())
932 schedule_timeout_uninterruptible(1);
936 void rcutorture_trace_dump(void)
938 static atomic_t beenhere = ATOMIC_INIT(0);
940 if (atomic_read(&beenhere))
942 if (atomic_xchg(&beenhere, 1) != 0)
944 do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
945 ftrace_dump(DUMP_ALL);
949 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
950 * incrementing the corresponding element of the pipeline array. The
951 * counter in the element should never be greater than 1, otherwise, the
952 * RCU implementation is broken.
954 static void rcu_torture_timer(unsigned long unused)
958 static DEFINE_RCU_RANDOM(rand);
959 static DEFINE_SPINLOCK(rand_lock);
960 struct rcu_torture *p;
963 idx = cur_ops->readlock();
964 completed = cur_ops->completed();
965 p = rcu_dereference_check(rcu_torture_current,
966 rcu_read_lock_bh_held() ||
967 rcu_read_lock_sched_held() ||
968 srcu_read_lock_held(&srcu_ctl));
969 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
971 /* Leave because rcu_torture_writer is not yet underway */
972 cur_ops->readunlock(idx);
975 if (p->rtort_mbtest == 0)
976 atomic_inc(&n_rcu_torture_mberror);
977 spin_lock(&rand_lock);
978 cur_ops->read_delay(&rand);
979 n_rcu_torture_timers++;
980 spin_unlock(&rand_lock);
982 pipe_count = p->rtort_pipe_count;
983 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
984 /* Should not happen, but... */
985 pipe_count = RCU_TORTURE_PIPE_LEN;
988 rcutorture_trace_dump();
989 __this_cpu_inc(rcu_torture_count[pipe_count]);
990 completed = cur_ops->completed() - completed;
991 if (completed > RCU_TORTURE_PIPE_LEN) {
992 /* Should not happen, but... */
993 completed = RCU_TORTURE_PIPE_LEN;
995 __this_cpu_inc(rcu_torture_batch[completed]);
997 cur_ops->readunlock(idx);
1001 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1002 * incrementing the corresponding element of the pipeline array. The
1003 * counter in the element should never be greater than 1, otherwise, the
1004 * RCU implementation is broken.
1007 rcu_torture_reader(void *arg)
1011 DEFINE_RCU_RANDOM(rand);
1012 struct rcu_torture *p;
1014 struct timer_list t;
1016 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
1017 set_user_nice(current, 19);
1018 if (irqreader && cur_ops->irq_capable)
1019 setup_timer_on_stack(&t, rcu_torture_timer, 0);
1022 if (irqreader && cur_ops->irq_capable) {
1023 if (!timer_pending(&t))
1024 mod_timer(&t, jiffies + 1);
1026 idx = cur_ops->readlock();
1027 completed = cur_ops->completed();
1028 p = rcu_dereference_check(rcu_torture_current,
1029 rcu_read_lock_bh_held() ||
1030 rcu_read_lock_sched_held() ||
1031 srcu_read_lock_held(&srcu_ctl));
1032 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1034 /* Wait for rcu_torture_writer to get underway */
1035 cur_ops->readunlock(idx);
1036 schedule_timeout_interruptible(HZ);
1039 if (p->rtort_mbtest == 0)
1040 atomic_inc(&n_rcu_torture_mberror);
1041 cur_ops->read_delay(&rand);
1043 pipe_count = p->rtort_pipe_count;
1044 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1045 /* Should not happen, but... */
1046 pipe_count = RCU_TORTURE_PIPE_LEN;
1049 rcutorture_trace_dump();
1050 __this_cpu_inc(rcu_torture_count[pipe_count]);
1051 completed = cur_ops->completed() - completed;
1052 if (completed > RCU_TORTURE_PIPE_LEN) {
1053 /* Should not happen, but... */
1054 completed = RCU_TORTURE_PIPE_LEN;
1056 __this_cpu_inc(rcu_torture_batch[completed]);
1058 cur_ops->readunlock(idx);
1060 rcu_stutter_wait("rcu_torture_reader");
1061 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1062 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1063 rcutorture_shutdown_absorb("rcu_torture_reader");
1064 if (irqreader && cur_ops->irq_capable)
1066 while (!kthread_should_stop())
1067 schedule_timeout_uninterruptible(1);
1072 * Create an RCU-torture statistics message in the specified buffer.
1075 rcu_torture_printk(char *page)
1080 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1081 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1083 for_each_possible_cpu(cpu) {
1084 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1085 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1086 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1089 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1090 if (pipesummary[i] != 0)
1093 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1094 cnt += sprintf(&page[cnt],
1095 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
1096 "rtmbe: %d rtbke: %ld rtbre: %ld "
1097 "rtbf: %ld rtb: %ld nt: %ld "
1098 "onoff: %ld/%ld:%ld/%ld",
1099 rcu_torture_current,
1100 rcu_torture_current_version,
1101 list_empty(&rcu_torture_freelist),
1102 atomic_read(&n_rcu_torture_alloc),
1103 atomic_read(&n_rcu_torture_alloc_fail),
1104 atomic_read(&n_rcu_torture_free),
1105 atomic_read(&n_rcu_torture_mberror),
1106 n_rcu_torture_boost_ktrerror,
1107 n_rcu_torture_boost_rterror,
1108 n_rcu_torture_boost_failure,
1109 n_rcu_torture_boosts,
1110 n_rcu_torture_timers,
1113 n_offline_successes,
1114 n_offline_attempts);
1115 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1116 n_rcu_torture_boost_ktrerror != 0 ||
1117 n_rcu_torture_boost_rterror != 0 ||
1118 n_rcu_torture_boost_failure != 0)
1119 cnt += sprintf(&page[cnt], " !!!");
1120 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1122 cnt += sprintf(&page[cnt], "!!! ");
1123 atomic_inc(&n_rcu_torture_error);
1126 cnt += sprintf(&page[cnt], "Reader Pipe: ");
1127 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1128 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1129 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1130 cnt += sprintf(&page[cnt], "Reader Batch: ");
1131 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1132 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1133 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1134 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1135 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1136 cnt += sprintf(&page[cnt], " %d",
1137 atomic_read(&rcu_torture_wcount[i]));
1139 cnt += sprintf(&page[cnt], "\n");
1141 cnt += cur_ops->stats(&page[cnt]);
1146 * Print torture statistics. Caller must ensure that there is only
1147 * one call to this function at a given time!!! This is normally
1148 * accomplished by relying on the module system to only have one copy
1149 * of the module loaded, and then by giving the rcu_torture_stats
1150 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1151 * thread is not running).
1154 rcu_torture_stats_print(void)
1158 cnt = rcu_torture_printk(printk_buf);
1159 printk(KERN_ALERT "%s", printk_buf);
1163 * Periodically prints torture statistics, if periodic statistics printing
1164 * was specified via the stat_interval module parameter.
1166 * No need to worry about fullstop here, since this one doesn't reference
1167 * volatile state or register callbacks.
1170 rcu_torture_stats(void *arg)
1172 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1174 schedule_timeout_interruptible(stat_interval * HZ);
1175 rcu_torture_stats_print();
1176 rcutorture_shutdown_absorb("rcu_torture_stats");
1177 } while (!kthread_should_stop());
1178 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1182 static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
1184 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1185 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1187 static void rcu_torture_shuffle_tasks(void)
1191 cpumask_setall(shuffle_tmp_mask);
1194 /* No point in shuffling if there is only one online CPU (ex: UP) */
1195 if (num_online_cpus() == 1) {
1200 if (rcu_idle_cpu != -1)
1201 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1203 set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1206 for (i = 0; i < nrealreaders; i++)
1207 if (reader_tasks[i])
1208 set_cpus_allowed_ptr(reader_tasks[i],
1212 if (fakewriter_tasks) {
1213 for (i = 0; i < nfakewriters; i++)
1214 if (fakewriter_tasks[i])
1215 set_cpus_allowed_ptr(fakewriter_tasks[i],
1220 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1223 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1225 if (rcu_idle_cpu == -1)
1226 rcu_idle_cpu = num_online_cpus() - 1;
1233 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1234 * system to become idle at a time and cut off its timer ticks. This is meant
1235 * to test the support for such tickless idle CPU in RCU.
1238 rcu_torture_shuffle(void *arg)
1240 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1242 schedule_timeout_interruptible(shuffle_interval * HZ);
1243 rcu_torture_shuffle_tasks();
1244 rcutorture_shutdown_absorb("rcu_torture_shuffle");
1245 } while (!kthread_should_stop());
1246 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1250 /* Cause the rcutorture test to "stutter", starting and stopping all
1251 * threads periodically.
1254 rcu_torture_stutter(void *arg)
1256 VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1258 schedule_timeout_interruptible(stutter * HZ);
1259 stutter_pause_test = 1;
1260 if (!kthread_should_stop())
1261 schedule_timeout_interruptible(stutter * HZ);
1262 stutter_pause_test = 0;
1263 rcutorture_shutdown_absorb("rcu_torture_stutter");
1264 } while (!kthread_should_stop());
1265 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1270 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1272 printk(KERN_ALERT "%s" TORTURE_FLAG
1273 "--- %s: nreaders=%d nfakewriters=%d "
1274 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1275 "shuffle_interval=%d stutter=%d irqreader=%d "
1276 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1277 "test_boost=%d/%d test_boost_interval=%d "
1278 "test_boost_duration=%d shutdown_secs=%d "
1279 "onoff_interval=%d\n",
1280 torture_type, tag, nrealreaders, nfakewriters,
1281 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1282 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1283 test_boost, cur_ops->can_boost,
1284 test_boost_interval, test_boost_duration, shutdown_secs,
1288 static struct notifier_block rcutorture_shutdown_nb = {
1289 .notifier_call = rcutorture_shutdown_notify,
1292 static void rcutorture_booster_cleanup(int cpu)
1294 struct task_struct *t;
1296 if (boost_tasks[cpu] == NULL)
1298 mutex_lock(&boost_mutex);
1299 VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1300 t = boost_tasks[cpu];
1301 boost_tasks[cpu] = NULL;
1302 mutex_unlock(&boost_mutex);
1304 /* This must be outside of the mutex, otherwise deadlock! */
1308 static int rcutorture_booster_init(int cpu)
1312 if (boost_tasks[cpu] != NULL)
1313 return 0; /* Already created, nothing more to do. */
1315 /* Don't allow time recalculation while creating a new task. */
1316 mutex_lock(&boost_mutex);
1317 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1318 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1320 "rcu_torture_boost");
1321 if (IS_ERR(boost_tasks[cpu])) {
1322 retval = PTR_ERR(boost_tasks[cpu]);
1323 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1324 n_rcu_torture_boost_ktrerror++;
1325 boost_tasks[cpu] = NULL;
1326 mutex_unlock(&boost_mutex);
1329 kthread_bind(boost_tasks[cpu], cpu);
1330 wake_up_process(boost_tasks[cpu]);
1331 mutex_unlock(&boost_mutex);
1336 * Cause the rcutorture test to shutdown the system after the test has
1337 * run for the time specified by the shutdown_secs module parameter.
1340 rcu_torture_shutdown(void *arg)
1343 unsigned long jiffies_snap;
1345 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
1346 jiffies_snap = ACCESS_ONCE(jiffies);
1347 while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
1348 !kthread_should_stop()) {
1349 delta = shutdown_time - jiffies_snap;
1351 printk(KERN_ALERT "%s" TORTURE_FLAG
1352 "rcu_torture_shutdown task: %lu "
1353 "jiffies remaining\n",
1354 torture_type, delta);
1355 schedule_timeout_interruptible(delta);
1356 jiffies_snap = ACCESS_ONCE(jiffies);
1358 if (kthread_should_stop()) {
1359 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
1363 /* OK, shut down the system. */
1365 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
1366 shutdown_task = NULL; /* Avoid self-kill deadlock. */
1367 rcu_torture_cleanup(); /* Get the success/failure message. */
1368 kernel_power_off(); /* Shut down the system. */
1372 #ifdef CONFIG_HOTPLUG_CPU
1375 * Execute random CPU-hotplug operations at the interval specified
1376 * by the onoff_interval.
1379 rcu_torture_onoff(void *arg)
1383 DEFINE_RCU_RANDOM(rand);
1385 VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
1386 for_each_online_cpu(cpu)
1388 WARN_ON(maxcpu < 0);
1389 while (!kthread_should_stop()) {
1390 cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
1391 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
1393 printk(KERN_ALERT "%s" TORTURE_FLAG
1394 "rcu_torture_onoff task: offlining %d\n",
1396 n_offline_attempts++;
1397 if (cpu_down(cpu) == 0) {
1399 printk(KERN_ALERT "%s" TORTURE_FLAG
1400 "rcu_torture_onoff task: "
1403 n_offline_successes++;
1405 } else if (cpu_is_hotpluggable(cpu)) {
1407 printk(KERN_ALERT "%s" TORTURE_FLAG
1408 "rcu_torture_onoff task: onlining %d\n",
1410 n_online_attempts++;
1411 if (cpu_up(cpu) == 0) {
1413 printk(KERN_ALERT "%s" TORTURE_FLAG
1414 "rcu_torture_onoff task: "
1417 n_online_successes++;
1420 schedule_timeout_interruptible(onoff_interval * HZ);
1422 VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
1427 rcu_torture_onoff_init(void)
1429 if (onoff_interval <= 0)
1431 onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
1432 if (IS_ERR(onoff_task)) {
1434 return PTR_ERR(onoff_task);
1439 static void rcu_torture_onoff_cleanup(void)
1441 if (onoff_task == NULL)
1443 VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
1444 kthread_stop(onoff_task);
1447 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1450 rcu_torture_onoff_init(void)
1454 static void rcu_torture_onoff_cleanup(void)
1458 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1460 static int rcutorture_cpu_notify(struct notifier_block *self,
1461 unsigned long action, void *hcpu)
1463 long cpu = (long)hcpu;
1467 case CPU_DOWN_FAILED:
1468 (void)rcutorture_booster_init(cpu);
1470 case CPU_DOWN_PREPARE:
1471 rcutorture_booster_cleanup(cpu);
1479 static struct notifier_block rcutorture_cpu_nb = {
1480 .notifier_call = rcutorture_cpu_notify,
1484 rcu_torture_cleanup(void)
1488 mutex_lock(&fullstop_mutex);
1489 rcutorture_record_test_transition();
1490 if (fullstop == FULLSTOP_SHUTDOWN) {
1491 printk(KERN_WARNING /* but going down anyway, so... */
1492 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1493 mutex_unlock(&fullstop_mutex);
1494 schedule_timeout_uninterruptible(10);
1495 if (cur_ops->cb_barrier != NULL)
1496 cur_ops->cb_barrier();
1499 fullstop = FULLSTOP_RMMOD;
1500 mutex_unlock(&fullstop_mutex);
1501 unregister_reboot_notifier(&rcutorture_shutdown_nb);
1503 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1504 kthread_stop(stutter_task);
1506 stutter_task = NULL;
1507 if (shuffler_task) {
1508 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1509 kthread_stop(shuffler_task);
1510 free_cpumask_var(shuffle_tmp_mask);
1512 shuffler_task = NULL;
1515 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1516 kthread_stop(writer_task);
1521 for (i = 0; i < nrealreaders; i++) {
1522 if (reader_tasks[i]) {
1523 VERBOSE_PRINTK_STRING(
1524 "Stopping rcu_torture_reader task");
1525 kthread_stop(reader_tasks[i]);
1527 reader_tasks[i] = NULL;
1529 kfree(reader_tasks);
1530 reader_tasks = NULL;
1532 rcu_torture_current = NULL;
1534 if (fakewriter_tasks) {
1535 for (i = 0; i < nfakewriters; i++) {
1536 if (fakewriter_tasks[i]) {
1537 VERBOSE_PRINTK_STRING(
1538 "Stopping rcu_torture_fakewriter task");
1539 kthread_stop(fakewriter_tasks[i]);
1541 fakewriter_tasks[i] = NULL;
1543 kfree(fakewriter_tasks);
1544 fakewriter_tasks = NULL;
1548 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1549 kthread_stop(stats_task);
1554 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1555 kthread_stop(fqs_task);
1558 if ((test_boost == 1 && cur_ops->can_boost) ||
1560 unregister_cpu_notifier(&rcutorture_cpu_nb);
1561 for_each_possible_cpu(i)
1562 rcutorture_booster_cleanup(i);
1564 if (shutdown_task != NULL) {
1565 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
1566 kthread_stop(shutdown_task);
1568 rcu_torture_onoff_cleanup();
1570 /* Wait for all RCU callbacks to fire. */
1572 if (cur_ops->cb_barrier != NULL)
1573 cur_ops->cb_barrier();
1575 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1577 if (cur_ops->cleanup)
1579 if (atomic_read(&n_rcu_torture_error))
1580 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1582 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1586 rcu_torture_init(void)
1591 static struct rcu_torture_ops *torture_ops[] =
1592 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1593 &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1594 &srcu_ops, &srcu_expedited_ops,
1595 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1597 mutex_lock(&fullstop_mutex);
1599 /* Process args and tell the world that the torturer is on the job. */
1600 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1601 cur_ops = torture_ops[i];
1602 if (strcmp(torture_type, cur_ops->name) == 0)
1605 if (i == ARRAY_SIZE(torture_ops)) {
1606 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1608 printk(KERN_ALERT "rcu-torture types:");
1609 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1610 printk(KERN_ALERT " %s", torture_ops[i]->name);
1611 printk(KERN_ALERT "\n");
1612 mutex_unlock(&fullstop_mutex);
1615 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1616 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1617 "fqs_duration, fqs disabled.\n");
1621 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1624 nrealreaders = nreaders;
1626 nrealreaders = 2 * num_online_cpus();
1627 rcu_torture_print_module_parms(cur_ops, "Start of test");
1628 fullstop = FULLSTOP_DONTSTOP;
1630 /* Set up the freelist. */
1632 INIT_LIST_HEAD(&rcu_torture_freelist);
1633 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1634 rcu_tortures[i].rtort_mbtest = 0;
1635 list_add_tail(&rcu_tortures[i].rtort_free,
1636 &rcu_torture_freelist);
1639 /* Initialize the statistics so that each run gets its own numbers. */
1641 rcu_torture_current = NULL;
1642 rcu_torture_current_version = 0;
1643 atomic_set(&n_rcu_torture_alloc, 0);
1644 atomic_set(&n_rcu_torture_alloc_fail, 0);
1645 atomic_set(&n_rcu_torture_free, 0);
1646 atomic_set(&n_rcu_torture_mberror, 0);
1647 atomic_set(&n_rcu_torture_error, 0);
1648 n_rcu_torture_boost_ktrerror = 0;
1649 n_rcu_torture_boost_rterror = 0;
1650 n_rcu_torture_boost_failure = 0;
1651 n_rcu_torture_boosts = 0;
1652 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1653 atomic_set(&rcu_torture_wcount[i], 0);
1654 for_each_possible_cpu(cpu) {
1655 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1656 per_cpu(rcu_torture_count, cpu)[i] = 0;
1657 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1661 /* Start up the kthreads. */
1663 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1664 writer_task = kthread_run(rcu_torture_writer, NULL,
1665 "rcu_torture_writer");
1666 if (IS_ERR(writer_task)) {
1667 firsterr = PTR_ERR(writer_task);
1668 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1672 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1674 if (fakewriter_tasks == NULL) {
1675 VERBOSE_PRINTK_ERRSTRING("out of memory");
1679 for (i = 0; i < nfakewriters; i++) {
1680 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1681 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1682 "rcu_torture_fakewriter");
1683 if (IS_ERR(fakewriter_tasks[i])) {
1684 firsterr = PTR_ERR(fakewriter_tasks[i]);
1685 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1686 fakewriter_tasks[i] = NULL;
1690 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1692 if (reader_tasks == NULL) {
1693 VERBOSE_PRINTK_ERRSTRING("out of memory");
1697 for (i = 0; i < nrealreaders; i++) {
1698 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1699 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1700 "rcu_torture_reader");
1701 if (IS_ERR(reader_tasks[i])) {
1702 firsterr = PTR_ERR(reader_tasks[i]);
1703 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1704 reader_tasks[i] = NULL;
1708 if (stat_interval > 0) {
1709 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1710 stats_task = kthread_run(rcu_torture_stats, NULL,
1711 "rcu_torture_stats");
1712 if (IS_ERR(stats_task)) {
1713 firsterr = PTR_ERR(stats_task);
1714 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1719 if (test_no_idle_hz) {
1720 rcu_idle_cpu = num_online_cpus() - 1;
1722 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1724 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1728 /* Create the shuffler thread */
1729 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1730 "rcu_torture_shuffle");
1731 if (IS_ERR(shuffler_task)) {
1732 free_cpumask_var(shuffle_tmp_mask);
1733 firsterr = PTR_ERR(shuffler_task);
1734 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1735 shuffler_task = NULL;
1742 /* Create the stutter thread */
1743 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1744 "rcu_torture_stutter");
1745 if (IS_ERR(stutter_task)) {
1746 firsterr = PTR_ERR(stutter_task);
1747 VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1748 stutter_task = NULL;
1752 if (fqs_duration < 0)
1755 /* Create the stutter thread */
1756 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1758 if (IS_ERR(fqs_task)) {
1759 firsterr = PTR_ERR(fqs_task);
1760 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1765 if (test_boost_interval < 1)
1766 test_boost_interval = 1;
1767 if (test_boost_duration < 2)
1768 test_boost_duration = 2;
1769 if ((test_boost == 1 && cur_ops->can_boost) ||
1773 boost_starttime = jiffies + test_boost_interval * HZ;
1774 register_cpu_notifier(&rcutorture_cpu_nb);
1775 for_each_possible_cpu(i) {
1776 if (cpu_is_offline(i))
1777 continue; /* Heuristic: CPU can go offline. */
1778 retval = rcutorture_booster_init(i);
1785 if (shutdown_secs > 0) {
1786 shutdown_time = jiffies + shutdown_secs * HZ;
1787 shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
1788 "rcu_torture_shutdown");
1789 if (IS_ERR(shutdown_task)) {
1790 firsterr = PTR_ERR(shutdown_task);
1791 VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
1792 shutdown_task = NULL;
1796 rcu_torture_onoff_init();
1797 register_reboot_notifier(&rcutorture_shutdown_nb);
1798 rcutorture_record_test_transition();
1799 mutex_unlock(&fullstop_mutex);
1803 mutex_unlock(&fullstop_mutex);
1804 rcu_torture_cleanup();
1808 module_init(rcu_torture_init);
1809 module_exit(rcu_torture_cleanup);