Merge branches 'doc.2014.09.07a', 'fixes.2014.09.10a', 'nocb-nohz.2014.09.16b' and...
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 16 Sep 2014 17:08:34 +0000 (10:08 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 16 Sep 2014 17:08:34 +0000 (10:08 -0700)
doc.2014.09.07a: Documentation updates.
fixes.2014.09.10a: Miscellaneous fixes.
nocb-nohz.2014.09.16b: No-CBs CPUs and NO_HZ_FULL updates.
torture.2014.09.07a: Torture-test updates.

29 files changed:
Documentation/kernel-parameters.txt
Documentation/memory-barriers.txt
include/linux/rcupdate.h
include/linux/torture.h
init/Kconfig
init/main.c
kernel/rcu/rcutorture.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/torture.c
tools/testing/selftests/rcutorture/bin/config2frag.sh [changed mode: 0644->0755]
tools/testing/selftests/rcutorture/bin/configcheck.sh
tools/testing/selftests/rcutorture/bin/configinit.sh
tools/testing/selftests/rcutorture/bin/functions.sh
tools/testing/selftests/rcutorture/bin/kvm-build.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh [changed mode: 0644->0755]
tools/testing/selftests/rcutorture/bin/parse-build.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/bin/parse-torture.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE01
tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE07
tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/doc/initrd.txt

index 5ae8608ca9f58a6331cae454ae7a0c84ab2f6c47..0a104be4ad862ed26a293bcc46d6b92862f8b7ea 100644 (file)
@@ -2881,6 +2881,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Lazy RCU callbacks are those which RCU can
                        prove do nothing more than free memory.
 
+       rcutorture.cbflood_inter_holdoff= [KNL]
+                       Set holdoff time (jiffies) between successive
+                       callback-flood tests.
+
+       rcutorture.cbflood_intra_holdoff= [KNL]
+                       Set holdoff time (jiffies) between successive
+                       bursts of callbacks within a given callback-flood
+                       test.
+
+       rcutorture.cbflood_n_burst= [KNL]
+                       Set the number of bursts making up a given
+                       callback-flood test.  Set this to zero to
+                       disable callback-flood testing.
+
+       rcutorture.cbflood_n_per_burst= [KNL]
+                       Set the number of callbacks to be registered
+                       in a given burst of a callback-flood test.
+
        rcutorture.fqs_duration= [KNL]
                        Set duration of force_quiescent_state bursts.
 
index a4de88fb55f087451cf97955a6ab419b74935c4e..22a969cdd4766a2f4f3651d359cd81defdb3bd21 100644 (file)
@@ -574,30 +574,14 @@ However, stores are not speculated.  This means that ordering -is- provided
 in the following example:
 
        q = ACCESS_ONCE(a);
-       if (ACCESS_ONCE(q)) {
-               ACCESS_ONCE(b) = p;
-       }
-
-Please note that ACCESS_ONCE() is not optional!  Without the ACCESS_ONCE(),
-the compiler is within its rights to transform this example:
-
-       q = a;
        if (q) {
-               b = p;  /* BUG: Compiler can reorder!!! */
-               do_something();
-       } else {
-               b = p;  /* BUG: Compiler can reorder!!! */
-               do_something_else();
+               ACCESS_ONCE(b) = p;
        }
 
-into this, which of course defeats the ordering:
-
-       b = p;
-       q = a;
-       if (q)
-               do_something();
-       else
-               do_something_else();
+Please note that ACCESS_ONCE() is not optional!  Without the
+ACCESS_ONCE(), might combine the load from 'a' with other loads from
+'a', and the store to 'b' with other stores to 'b', with possible highly
+counterintuitive effects on ordering.
 
 Worse yet, if the compiler is able to prove (say) that the value of
 variable 'a' is always non-zero, it would be well within its rights
@@ -605,11 +589,12 @@ to optimize the original example by eliminating the "if" statement
 as follows:
 
        q = a;
-       b = p;  /* BUG: Compiler can reorder!!! */
-       do_something();
+       b = p;  /* BUG: Compiler and CPU can both reorder!!! */
+
+So don't leave out the ACCESS_ONCE().
 
-The solution is again ACCESS_ONCE() and barrier(), which preserves the
-ordering between the load from variable 'a' and the store to variable 'b':
+It is tempting to try to enforce ordering on identical stores on both
+branches of the "if" statement as follows:
 
        q = ACCESS_ONCE(a);
        if (q) {
@@ -622,18 +607,11 @@ ordering between the load from variable 'a' and the store to variable 'b':
                do_something_else();
        }
 
-The initial ACCESS_ONCE() is required to prevent the compiler from
-proving the value of 'a', and the pair of barrier() invocations are
-required to prevent the compiler from pulling the two identical stores
-to 'b' out from the legs of the "if" statement.
-
-It is important to note that control dependencies absolutely require a
-a conditional.  For example, the following "optimized" version of
-the above example breaks ordering, which is why the barrier() invocations
-are absolutely required if you have identical stores in both legs of
-the "if" statement:
+Unfortunately, current compilers will transform this as follows at high
+optimization levels:
 
        q = ACCESS_ONCE(a);
+       barrier();
        ACCESS_ONCE(b) = p;  /* BUG: No ordering vs. load from a!!! */
        if (q) {
                /* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
@@ -643,21 +621,36 @@ the "if" statement:
                do_something_else();
        }
 
-It is of course legal for the prior load to be part of the conditional,
-for example, as follows:
+Now there is no conditional between the load from 'a' and the store to
+'b', which means that the CPU is within its rights to reorder them:
+The conditional is absolutely required, and must be present in the
+assembly code even after all compiler optimizations have been applied.
+Therefore, if you need ordering in this example, you need explicit
+memory barriers, for example, smp_store_release():
 
-       if (ACCESS_ONCE(a) > 0) {
-               barrier();
-               ACCESS_ONCE(b) = q / 2;
+       q = ACCESS_ONCE(a);
+       if (q) {
+               smp_store_release(&b, p);
                do_something();
        } else {
-               barrier();
-               ACCESS_ONCE(b) = q / 3;
+               smp_store_release(&b, p);
+               do_something_else();
+       }
+
+In contrast, without explicit memory barriers, two-legged-if control
+ordering is guaranteed only when the stores differ, for example:
+
+       q = ACCESS_ONCE(a);
+       if (q) {
+               ACCESS_ONCE(b) = p;
+               do_something();
+       } else {
+               ACCESS_ONCE(b) = r;
                do_something_else();
        }
 
-This will again ensure that the load from variable 'a' is ordered before the
-stores to variable 'b'.
+The initial ACCESS_ONCE() is still required to prevent the compiler from
+proving the value of 'a'.
 
 In addition, you need to be careful what you do with the local variable 'q',
 otherwise the compiler might be able to guess the value and again remove
@@ -665,12 +658,10 @@ the needed conditional.  For example:
 
        q = ACCESS_ONCE(a);
        if (q % MAX) {
-               barrier();
                ACCESS_ONCE(b) = p;
                do_something();
        } else {
-               barrier();
-               ACCESS_ONCE(b) = p;
+               ACCESS_ONCE(b) = r;
                do_something_else();
        }
 
@@ -682,9 +673,12 @@ transform the above code into the following:
        ACCESS_ONCE(b) = p;
        do_something_else();
 
-This transformation loses the ordering between the load from variable 'a'
-and the store to variable 'b'.  If you are relying on this ordering, you
-should do something like the following:
+Given this transformation, the CPU is not required to respect the ordering
+between the load from variable 'a' and the store to variable 'b'.  It is
+tempting to add a barrier(), but this does not help.  The conditional
+is gone, and the barrier won't bring it back.  Therefore, if you are
+relying on this ordering, you should make sure that MAX is greater than
+one, perhaps as follows:
 
        q = ACCESS_ONCE(a);
        BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
@@ -692,35 +686,45 @@ should do something like the following:
                ACCESS_ONCE(b) = p;
                do_something();
        } else {
-               ACCESS_ONCE(b) = p;
+               ACCESS_ONCE(b) = r;
                do_something_else();
        }
 
+Please note once again that the stores to 'b' differ.  If they were
+identical, as noted earlier, the compiler could pull this store outside
+of the 'if' statement.
+
 Finally, control dependencies do -not- provide transitivity.  This is
-demonstrated by two related examples:
+demonstrated by two related examples, with the initial values of
+x and y both being zero:
 
        CPU 0                     CPU 1
        =====================     =====================
        r1 = ACCESS_ONCE(x);      r2 = ACCESS_ONCE(y);
-       if (r1 >= 0)              if (r2 >= 0)
+       if (r1 > 0)               if (r2 > 0)
          ACCESS_ONCE(y) = 1;       ACCESS_ONCE(x) = 1;
 
        assert(!(r1 == 1 && r2 == 1));
 
 The above two-CPU example will never trigger the assert().  However,
 if control dependencies guaranteed transitivity (which they do not),
-then adding the following two CPUs would guarantee a related assertion:
+then adding the following CPU would guarantee a related assertion:
 
-       CPU 2                     CPU 3
-       =====================     =====================
-       ACCESS_ONCE(x) = 2;       ACCESS_ONCE(y) = 2;
+       CPU 2
+       =====================
+       ACCESS_ONCE(x) = 2;
+
+       assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
 
-       assert(!(r1 == 2 && r2 == 2 && x == 1 && y == 1)); /* FAILS!!! */
+But because control dependencies do -not- provide transitivity, the above
+assertion can fail after the combined three-CPU example completes.  If you
+need the three-CPU example to provide ordering, you will need smp_mb()
+between the loads and stores in the CPU 0 and CPU 1 code fragments,
+that is, just before or just after the "if" statements.
 
-But because control dependencies do -not- provide transitivity, the
-above assertion can fail after the combined four-CPU example completes.
-If you need the four-CPU example to provide ordering, you will need
-smp_mb() between the loads and stores in the CPU 0 and CPU 1 code fragments.
+These two examples are the LB and WWC litmus tests from this paper:
+http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf and this
+site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
 
 In summary:
 
index 321ed0d4e6752a124481316c28e4972ab3e48539..334ff89aada0f1b3f6c052d2f56a53175ac46e84 100644 (file)
@@ -269,6 +269,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
                                         struct task_struct *next) { }
 #endif /* CONFIG_RCU_USER_QS */
 
+#ifdef CONFIG_RCU_NOCB_CPU
+void rcu_init_nohz(void);
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+static inline void rcu_init_nohz(void)
+{
+}
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
 /**
  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
  * @a: Code that RCU needs to pay attention to.
index 5ca58fcbaf1b3be68c9bca647993be7201974faa..fec46f8c08eb1c0f6eb42196c0c1d355f4dd0786 100644 (file)
@@ -51,7 +51,7 @@
 
 /* Definitions for online/offline exerciser. */
 int torture_onoff_init(long ooholdoff, long oointerval);
-char *torture_onoff_stats(char *page);
+void torture_onoff_stats(void);
 bool torture_onoff_failures(void);
 
 /* Low-rider random number generator. */
index e84c6423a2e5a2dbe80157b13f8b16d17c3e2d06..64ee4d9677867b8949a0b38f3530c38c0eed3d9f 100644 (file)
@@ -737,7 +737,7 @@ choice
 
 config RCU_NOCB_CPU_NONE
        bool "No build_forced no-CBs CPUs"
-       depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
+       depends on RCU_NOCB_CPU
        help
          This option does not force any of the CPUs to be no-CBs CPUs.
          Only CPUs designated by the rcu_nocbs= boot parameter will be
@@ -751,7 +751,7 @@ config RCU_NOCB_CPU_NONE
 
 config RCU_NOCB_CPU_ZERO
        bool "CPU 0 is a build_forced no-CBs CPU"
-       depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
+       depends on RCU_NOCB_CPU
        help
          This option forces CPU 0 to be a no-CBs CPU, so that its RCU
          callbacks are invoked by a per-CPU kthread whose name begins
index bb1aed928f21391b63493112c6bb1a1eae39077f..e3c4cdd94d5b62ea561da64644e5e95708116ce1 100644 (file)
@@ -578,6 +578,7 @@ asmlinkage __visible void __init start_kernel(void)
        idr_init_cache();
        rcu_init();
        tick_nohz_init();
+       rcu_init_nohz();
        context_tracking_init();
        radix_tree_init();
        /* init some links before init_ISA_irqs() */
index 948a7693748ed3d0be04fb022d5ac6a195903e3c..0bcd53adac73afb0f67c3d292ff556f9876e3ae8 100644 (file)
 #include <linux/trace_clock.h>
 #include <asm/byteorder.h>
 #include <linux/torture.h>
+#include <linux/vmalloc.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
+torture_param(int, cbflood_inter_holdoff, HZ,
+             "Holdoff between floods (jiffies)");
+torture_param(int, cbflood_intra_holdoff, 1,
+             "Holdoff between bursts (jiffies)");
+torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
+torture_param(int, cbflood_n_per_burst, 20000,
+             "# callbacks per burst in flood");
 torture_param(int, fqs_duration, 0,
              "Duration of fqs bursts (us), 0 to disable");
 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -96,10 +104,12 @@ module_param(torture_type, charp, 0444);
 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
 
 static int nrealreaders;
+static int ncbflooders;
 static struct task_struct *writer_task;
 static struct task_struct **fakewriter_tasks;
 static struct task_struct **reader_tasks;
 static struct task_struct *stats_task;
+static struct task_struct **cbflood_task;
 static struct task_struct *fqs_task;
 static struct task_struct *boost_tasks[NR_CPUS];
 static struct task_struct *stall_task;
@@ -138,6 +148,7 @@ static long n_rcu_torture_boosts;
 static long n_rcu_torture_timers;
 static long n_barrier_attempts;
 static long n_barrier_successes;
+static atomic_long_t n_cbfloods;
 static struct list_head rcu_torture_removed;
 
 static int rcu_torture_writer_state;
@@ -182,7 +193,7 @@ static u64 notrace rcu_trace_clock_local(void)
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
 static unsigned long boost_starttime;  /* jiffies of next boost test start. */
-DEFINE_MUTEX(boost_mutex);             /* protect setting boost_starttime */
+static DEFINE_MUTEX(boost_mutex);      /* protect setting boost_starttime */
                                        /*  and boost task create/destroy. */
 static atomic_t barrier_cbs_count;     /* Barrier callbacks registered. */
 static bool barrier_phase;             /* Test phase. */
@@ -242,7 +253,7 @@ struct rcu_torture_ops {
        void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
        void (*cb_barrier)(void);
        void (*fqs)(void);
-       void (*stats)(char *page);
+       void (*stats)(void);
        int irq_capable;
        int can_boost;
        const char *name;
@@ -525,21 +536,21 @@ static void srcu_torture_barrier(void)
        srcu_barrier(&srcu_ctl);
 }
 
-static void srcu_torture_stats(char *page)
+static void srcu_torture_stats(void)
 {
        int cpu;
        int idx = srcu_ctl.completed & 0x1;
 
-       page += sprintf(page, "%s%s per-CPU(idx=%d):",
-                      torture_type, TORTURE_FLAG, idx);
+       pr_alert("%s%s per-CPU(idx=%d):",
+                torture_type, TORTURE_FLAG, idx);
        for_each_possible_cpu(cpu) {
                long c0, c1;
 
                c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx];
                c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx];
-               page += sprintf(page, " %d(%ld,%ld)", cpu, c0, c1);
+               pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
        }
-       sprintf(page, "\n");
+       pr_cont("\n");
 }
 
 static void srcu_torture_synchronize_expedited(void)
@@ -707,6 +718,58 @@ checkwait: stutter_wait("rcu_torture_boost");
        return 0;
 }
 
+static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
+{
+}
+
+/*
+ * RCU torture callback-flood kthread.  Repeatedly induces bursts of calls
+ * to call_rcu() or analogous, increasing the probability of occurrence
+ * of callback-overflow corner cases.
+ */
+static int
+rcu_torture_cbflood(void *arg)
+{
+       int err = 1;
+       int i;
+       int j;
+       struct rcu_head *rhp;
+
+       if (cbflood_n_per_burst > 0 &&
+           cbflood_inter_holdoff > 0 &&
+           cbflood_intra_holdoff > 0 &&
+           cur_ops->call &&
+           cur_ops->cb_barrier) {
+               rhp = vmalloc(sizeof(*rhp) *
+                             cbflood_n_burst * cbflood_n_per_burst);
+               err = !rhp;
+       }
+       if (err) {
+               VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
+               while (!torture_must_stop())
+                       schedule_timeout_interruptible(HZ);
+               return 0;
+       }
+       VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
+       do {
+               schedule_timeout_interruptible(cbflood_inter_holdoff);
+               atomic_long_inc(&n_cbfloods);
+               WARN_ON(signal_pending(current));
+               for (i = 0; i < cbflood_n_burst; i++) {
+                       for (j = 0; j < cbflood_n_per_burst; j++) {
+                               cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
+                                             rcu_torture_cbflood_cb);
+                       }
+                       schedule_timeout_interruptible(cbflood_intra_holdoff);
+                       WARN_ON(signal_pending(current));
+               }
+               cur_ops->cb_barrier();
+               stutter_wait("rcu_torture_cbflood");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_torture_cbflood");
+       return 0;
+}
+
 /*
  * RCU torture force-quiescent-state kthread.  Repeatedly induces
  * bursts of calls to force_quiescent_state(), increasing the probability
@@ -1031,10 +1094,15 @@ rcu_torture_reader(void *arg)
 }
 
 /*
- * Create an RCU-torture statistics message in the specified buffer.
+ * Print torture statistics.  Caller must ensure that there is only
+ * one call to this function at a given time!!!  This is normally
+ * accomplished by relying on the module system to only have one copy
+ * of the module loaded, and then by giving the rcu_torture_stats
+ * kthread full control (or the init/cleanup functions when rcu_torture_stats
+ * thread is not running).
  */
 static void
-rcu_torture_printk(char *page)
+rcu_torture_stats_print(void)
 {
        int cpu;
        int i;
@@ -1052,55 +1120,61 @@ rcu_torture_printk(char *page)
                if (pipesummary[i] != 0)
                        break;
        }
-       page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
-       page += sprintf(page,
-                      "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
-                      rcu_torture_current,
-                      rcu_torture_current_version,
-                      list_empty(&rcu_torture_freelist),
-                      atomic_read(&n_rcu_torture_alloc),
-                      atomic_read(&n_rcu_torture_alloc_fail),
-                      atomic_read(&n_rcu_torture_free));
-       page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
-                      atomic_read(&n_rcu_torture_mberror),
-                      n_rcu_torture_boost_ktrerror,
-                      n_rcu_torture_boost_rterror);
-       page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
-                      n_rcu_torture_boost_failure,
-                      n_rcu_torture_boosts,
-                      n_rcu_torture_timers);
-       page = torture_onoff_stats(page);
-       page += sprintf(page, "barrier: %ld/%ld:%ld",
-                      n_barrier_successes,
-                      n_barrier_attempts,
-                      n_rcu_torture_barrier_error);
-       page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+
+       pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+       pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
+               rcu_torture_current,
+               rcu_torture_current_version,
+               list_empty(&rcu_torture_freelist),
+               atomic_read(&n_rcu_torture_alloc),
+               atomic_read(&n_rcu_torture_alloc_fail),
+               atomic_read(&n_rcu_torture_free));
+       pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
+               atomic_read(&n_rcu_torture_mberror),
+               n_rcu_torture_boost_ktrerror,
+               n_rcu_torture_boost_rterror);
+       pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
+               n_rcu_torture_boost_failure,
+               n_rcu_torture_boosts,
+               n_rcu_torture_timers);
+       torture_onoff_stats();
+       pr_cont("barrier: %ld/%ld:%ld ",
+               n_barrier_successes,
+               n_barrier_attempts,
+               n_rcu_torture_barrier_error);
+       pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
+
+       pr_alert("%s%s ", torture_type, TORTURE_FLAG);
        if (atomic_read(&n_rcu_torture_mberror) != 0 ||
            n_rcu_torture_barrier_error != 0 ||
            n_rcu_torture_boost_ktrerror != 0 ||
            n_rcu_torture_boost_rterror != 0 ||
            n_rcu_torture_boost_failure != 0 ||
            i > 1) {
-               page += sprintf(page, "!!! ");
+               pr_cont("%s", "!!! ");
                atomic_inc(&n_rcu_torture_error);
                WARN_ON_ONCE(1);
        }
-       page += sprintf(page, "Reader Pipe: ");
+       pr_cont("Reader Pipe: ");
        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
-               page += sprintf(page, " %ld", pipesummary[i]);
-       page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
-       page += sprintf(page, "Reader Batch: ");
+               pr_cont(" %ld", pipesummary[i]);
+       pr_cont("\n");
+
+       pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+       pr_cont("Reader Batch: ");
        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
-               page += sprintf(page, " %ld", batchsummary[i]);
-       page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
-       page += sprintf(page, "Free-Block Circulation: ");
+               pr_cont(" %ld", batchsummary[i]);
+       pr_cont("\n");
+
+       pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+       pr_cont("Free-Block Circulation: ");
        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
-               page += sprintf(page, " %d",
-                              atomic_read(&rcu_torture_wcount[i]));
+               pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
        }
-       page += sprintf(page, "\n");
+       pr_cont("\n");
+
        if (cur_ops->stats)
-               cur_ops->stats(page);
+               cur_ops->stats();
        if (rtcv_snap == rcu_torture_current_version &&
            rcu_torture_current != NULL) {
                int __maybe_unused flags;
@@ -1109,40 +1183,15 @@ rcu_torture_printk(char *page)
 
                rcutorture_get_gp_data(cur_ops->ttype,
                                       &flags, &gpnum, &completed);
-               page += sprintf(page,
-                               "??? Writer stall state %d g%lu c%lu f%#x\n",
-                               rcu_torture_writer_state,
-                               gpnum, completed, flags);
+               pr_alert("??? Writer stall state %d g%lu c%lu f%#x\n",
+                        rcu_torture_writer_state,
+                        gpnum, completed, flags);
                show_rcu_gp_kthreads();
                rcutorture_trace_dump();
        }
        rtcv_snap = rcu_torture_current_version;
 }
 
-/*
- * Print torture statistics.  Caller must ensure that there is only
- * one call to this function at a given time!!!  This is normally
- * accomplished by relying on the module system to only have one copy
- * of the module loaded, and then by giving the rcu_torture_stats
- * kthread full control (or the init/cleanup functions when rcu_torture_stats
- * thread is not running).
- */
-static void
-rcu_torture_stats_print(void)
-{
-       int size = nr_cpu_ids * 200 + 8192;
-       char *buf;
-
-       buf = kmalloc(size, GFP_KERNEL);
-       if (!buf) {
-               pr_err("rcu-torture: Out of memory, need: %d", size);
-               return;
-       }
-       rcu_torture_printk(buf);
-       pr_alert("%s", buf);
-       kfree(buf);
-}
-
 /*
  * Periodically prints torture statistics, if periodic statistics printing
  * was specified via the stat_interval module parameter.
@@ -1447,6 +1496,8 @@ rcu_torture_cleanup(void)
 
        torture_stop_kthread(rcu_torture_stats, stats_task);
        torture_stop_kthread(rcu_torture_fqs, fqs_task);
+       for (i = 0; i < ncbflooders; i++)
+               torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
        if ((test_boost == 1 && cur_ops->can_boost) ||
            test_boost == 2) {
                unregister_cpu_notifier(&rcutorture_cpu_nb);
@@ -1693,6 +1744,24 @@ rcu_torture_init(void)
                goto unwind;
        if (object_debug)
                rcu_test_debug_objects();
+       if (cbflood_n_burst > 0) {
+               /* Create the cbflood threads */
+               ncbflooders = (num_online_cpus() + 3) / 4;
+               cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
+                                      GFP_KERNEL);
+               if (!cbflood_task) {
+                       VERBOSE_TOROUT_ERRSTRING("out of memory");
+                       firsterr = -ENOMEM;
+                       goto unwind;
+               }
+               for (i = 0; i < ncbflooders; i++) {
+                       firsterr = torture_create_kthread(rcu_torture_cbflood,
+                                                         NULL,
+                                                         cbflood_task[i]);
+                       if (firsterr)
+                               goto unwind;
+               }
+       }
        rcutorture_record_test_transition();
        torture_init_end();
        return 0;
index 3e002c1cb441397a454dff0d063150f94ca762a2..be0d0a1b71292fd0efc1b377c3d259b16fccdb19 100644 (file)
@@ -3451,6 +3451,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
        case CPU_UP_PREPARE_FROZEN:
                rcu_prepare_cpu(cpu);
                rcu_prepare_kthreads(cpu);
+               rcu_spawn_all_nocb_kthreads(cpu);
                break;
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
@@ -3498,7 +3499,7 @@ static int rcu_pm_notify(struct notifier_block *self,
 }
 
 /*
- * Spawn the kthread that handles this RCU flavor's grace periods.
+ * Spawn the kthreads that handle each RCU flavor's grace periods.
  */
 static int __init rcu_spawn_gp_kthread(void)
 {
@@ -3507,6 +3508,7 @@ static int __init rcu_spawn_gp_kthread(void)
        struct rcu_state *rsp;
        struct task_struct *t;
 
+       rcu_scheduler_fully_active = 1;
        for_each_rcu_flavor(rsp) {
                t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
                BUG_ON(IS_ERR(t));
@@ -3514,8 +3516,9 @@ static int __init rcu_spawn_gp_kthread(void)
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rsp->gp_kthread = t;
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               rcu_spawn_nocb_kthreads(rsp);
        }
+       rcu_spawn_nocb_kthreads();
+       rcu_spawn_boost_kthreads();
        return 0;
 }
 early_initcall(rcu_spawn_gp_kthread);
index e33562f2a655cab0d50bda67564de65c03bd615f..ffedcb9d42dc12208742a57f47157ad91157c481 100644 (file)
@@ -577,6 +577,7 @@ static void rcu_preempt_do_callbacks(void);
 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                                                 struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
+static void __init rcu_spawn_boost_kthreads(void);
 static void rcu_prepare_kthreads(int cpu);
 static void rcu_cleanup_after_idle(int cpu);
 static void rcu_prepare_for_idle(int cpu);
@@ -597,7 +598,11 @@ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
-static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
+static void rcu_spawn_all_nocb_kthreads(int cpu);
+static void __init rcu_spawn_nocb_kthreads(void);
+#ifdef CONFIG_RCU_NOCB_CPU
+static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
index bbb0a0cd091b9a0df13a74ff5a13716f1af538b4..59318ea32bc8e67ab4cbacb55f3b46bd4546e776 100644 (file)
@@ -85,33 +85,6 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
-#ifdef CONFIG_RCU_NOCB_CPU
-#ifndef CONFIG_RCU_NOCB_CPU_NONE
-       if (!have_rcu_nocb_mask) {
-               zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
-               have_rcu_nocb_mask = true;
-       }
-#ifdef CONFIG_RCU_NOCB_CPU_ZERO
-       pr_info("\tOffload RCU callbacks from CPU 0\n");
-       cpumask_set_cpu(0, rcu_nocb_mask);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
-#ifdef CONFIG_RCU_NOCB_CPU_ALL
-       pr_info("\tOffload RCU callbacks from all CPUs\n");
-       cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
-       if (have_rcu_nocb_mask) {
-               if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
-                       pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
-                       cpumask_and(rcu_nocb_mask, cpu_possible_mask,
-                                   rcu_nocb_mask);
-               }
-               cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
-               pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
-               if (rcu_nocb_poll)
-                       pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
-       }
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 }
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
@@ -1463,14 +1436,13 @@ static struct smp_hotplug_thread rcu_cpu_thread_spec = {
 };
 
 /*
- * Spawn all kthreads -- called as soon as the scheduler is running.
+ * Spawn boost kthreads -- called as soon as the scheduler is running.
  */
-static int __init rcu_spawn_kthreads(void)
+static void __init rcu_spawn_boost_kthreads(void)
 {
        struct rcu_node *rnp;
        int cpu;
 
-       rcu_scheduler_fully_active = 1;
        for_each_possible_cpu(cpu)
                per_cpu(rcu_cpu_has_work, cpu) = 0;
        BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
@@ -1480,9 +1452,7 @@ static int __init rcu_spawn_kthreads(void)
                rcu_for_each_leaf_node(rcu_state_p, rnp)
                        (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
        }
-       return 0;
 }
-early_initcall(rcu_spawn_kthreads);
 
 static void rcu_prepare_kthreads(int cpu)
 {
@@ -1520,12 +1490,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 {
 }
 
-static int __init rcu_scheduler_really_started(void)
+static void __init rcu_spawn_boost_kthreads(void)
 {
-       rcu_scheduler_fully_active = 1;
-       return 0;
 }
-early_initcall(rcu_scheduler_really_started);
 
 static void rcu_prepare_kthreads(int cpu)
 {
@@ -2076,7 +2043,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
        if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
                return;
        if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
-               /* Prior xchg orders against prior callback enqueue. */
+               /* Prior smp_mb__after_atomic() orders against prior enqueue. */
                ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
                wake_up(&rdp_leader->nocb_wq);
        }
@@ -2105,6 +2072,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        ACCESS_ONCE(*old_rhpp) = rhp;
        atomic_long_add(rhcount, &rdp->nocb_q_count);
        atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
+       smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 
        /* If we are not being polled and there is a kthread, awaken it ... */
        t = ACCESS_ONCE(rdp->nocb_kthread);
@@ -2158,7 +2126,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
 {
 
        if (!rcu_is_nocb_cpu(rdp->cpu))
-               return 0;
+               return false;
        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
        if (__is_kfree_rcu_offset((unsigned long)rhp->func))
                trace_rcu_kfree_callback(rdp->rsp->name, rhp,
@@ -2169,7 +2137,18 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                trace_rcu_callback(rdp->rsp->name, rhp,
                                   -atomic_long_read(&rdp->nocb_q_count_lazy),
                                   -atomic_long_read(&rdp->nocb_q_count));
-       return 1;
+
+       /*
+        * If called from an extended quiescent state with interrupts
+        * disabled, invoke the RCU core in order to allow the idle-entry
+        * deferred-wakeup check to function.
+        */
+       if (irqs_disabled_flags(flags) &&
+           !rcu_is_watching() &&
+           cpu_online(smp_processor_id()))
+               invoke_rcu_core();
+
+       return true;
 }
 
 /*
@@ -2185,7 +2164,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
 
        /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
        if (!rcu_is_nocb_cpu(smp_processor_id()))
-               return 0;
+               return false;
        rsp->qlen = 0;
        rsp->qlen_lazy = 0;
 
@@ -2204,7 +2183,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
                rsp->orphan_nxtlist = NULL;
                rsp->orphan_nxttail = &rsp->orphan_nxtlist;
        }
-       return 1;
+       return true;
 }
 
 /*
@@ -2335,6 +2314,7 @@ wait_again:
                atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
                atomic_long_add(rdp->nocb_gp_count_lazy,
                                &rdp->nocb_follower_count_lazy);
+               smp_mb__after_atomic(); /* Store *tail before wakeup. */
                if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
                        /*
                         * List was empty, wake up the follower.
@@ -2463,6 +2443,71 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
 }
 
+void __init rcu_init_nohz(void)
+{
+       int cpu;
+       bool need_rcu_nocb_mask = true;
+       struct rcu_state *rsp;
+
+#ifdef CONFIG_RCU_NOCB_CPU_NONE
+       need_rcu_nocb_mask = false;
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
+
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
+               need_rcu_nocb_mask = true;
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
+               if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
+                       pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
+                       return;
+               }
+               have_rcu_nocb_mask = true;
+       }
+       if (!have_rcu_nocb_mask)
+               return;
+
+#ifdef CONFIG_RCU_NOCB_CPU_ZERO
+       pr_info("\tOffload RCU callbacks from CPU 0\n");
+       cpumask_set_cpu(0, rcu_nocb_mask);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
+#ifdef CONFIG_RCU_NOCB_CPU_ALL
+       pr_info("\tOffload RCU callbacks from all CPUs\n");
+       cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
+#if defined(CONFIG_NO_HZ_FULL)
+       if (tick_nohz_full_running)
+               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+       if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
+               pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
+               cpumask_and(rcu_nocb_mask, cpu_possible_mask,
+                           rcu_nocb_mask);
+       }
+       cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
+       pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
+       if (rcu_nocb_poll)
+               pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
+
+       for_each_rcu_flavor(rsp) {
+               for_each_cpu(cpu, rcu_nocb_mask) {
+                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+
+                       /*
+                        * If there are early callbacks, they will need
+                        * to be moved to the nocb lists.
+                        */
+                       WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
+                                    &rdp->nxtlist &&
+                                    rdp->nxttail[RCU_NEXT_TAIL] != NULL);
+                       init_nocb_callback_list(rdp);
+               }
+               rcu_organize_nocb_kthreads(rsp);
+       }
+}
+
 /* Initialize per-rcu_data variables for no-CBs CPUs. */
 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 {
@@ -2471,15 +2516,85 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
        rdp->nocb_follower_tail = &rdp->nocb_follower_head;
 }
 
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
+ * brought online out of order, this can require re-organizing the
+ * leader-follower relationships.
+ */
+static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+{
+       struct rcu_data *rdp;
+       struct rcu_data *rdp_last;
+       struct rcu_data *rdp_old_leader;
+       struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
+       struct task_struct *t;
+
+       /*
+        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
+        * then nothing to do.
+        */
+       if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
+               return;
+
+       /* If we didn't spawn the leader first, reorganize! */
+       rdp_old_leader = rdp_spawn->nocb_leader;
+       if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
+               rdp_last = NULL;
+               rdp = rdp_old_leader;
+               do {
+                       rdp->nocb_leader = rdp_spawn;
+                       if (rdp_last && rdp != rdp_spawn)
+                               rdp_last->nocb_next_follower = rdp;
+                       rdp_last = rdp;
+                       rdp = rdp->nocb_next_follower;
+                       rdp_last->nocb_next_follower = NULL;
+               } while (rdp);
+               rdp_spawn->nocb_next_follower = rdp_old_leader;
+       }
+
+       /* Spawn the kthread for this CPU and RCU flavor. */
+       t = kthread_run(rcu_nocb_kthread, rdp_spawn,
+                       "rcuo%c/%d", rsp->abbr, cpu);
+       BUG_ON(IS_ERR(t));
+       ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthreads, spawn them.
+ */
+static void rcu_spawn_all_nocb_kthreads(int cpu)
+{
+       struct rcu_state *rsp;
+
+       if (rcu_scheduler_fully_active)
+               for_each_rcu_flavor(rsp)
+                       rcu_spawn_one_nocb_kthread(rsp, cpu);
+}
+
+/*
+ * Once the scheduler is running, spawn rcuo kthreads for all online
+ * no-CBs CPUs.  This assumes that the early_initcall()s happen before
+ * non-boot CPUs come online -- if this changes, we will need to add
+ * some mutual exclusion.
+ */
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               rcu_spawn_all_nocb_kthreads(cpu);
+}
+
 /* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
 static int rcu_nocb_leader_stride = -1;
 module_param(rcu_nocb_leader_stride, int, 0444);
 
 /*
- * Create a kthread for each RCU flavor for each no-CBs CPU.
- * Also initialize leader-follower relationships.
+ * Initialize leader-follower relationships for all no-CBs CPU.
  */
-static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
 {
        int cpu;
        int ls = rcu_nocb_leader_stride;
@@ -2487,14 +2602,9 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
        struct rcu_data *rdp_prev = NULL;
-       struct task_struct *t;
 
-       if (rcu_nocb_mask == NULL)
+       if (!have_rcu_nocb_mask)
                return;
-#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
-       if (tick_nohz_full_running)
-               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
-#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
        if (ls == -1) {
                ls = int_sqrt(nr_cpu_ids);
                rcu_nocb_leader_stride = ls;
@@ -2517,21 +2627,15 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
                        rdp_prev->nocb_next_follower = rdp;
                }
                rdp_prev = rdp;
-
-               /* Spawn the kthread for this CPU. */
-               t = kthread_run(rcu_nocb_kthread, rdp,
-                               "rcuo%c/%d", rsp->abbr, cpu);
-               BUG_ON(IS_ERR(t));
-               ACCESS_ONCE(rdp->nocb_kthread) = t;
        }
 }
 
 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
 static bool init_nocb_callback_list(struct rcu_data *rdp)
 {
-       if (rcu_nocb_mask == NULL ||
-           !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
+       if (!rcu_is_nocb_cpu(rdp->cpu))
                return false;
+
        rdp->nxttail[RCU_NEXT_TAIL] = NULL;
        return true;
 }
@@ -2553,14 +2657,14 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy, unsigned long flags)
 {
-       return 0;
+       return false;
 }
 
 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
                                                     struct rcu_data *rdp,
                                                     unsigned long flags)
 {
-       return 0;
+       return false;
 }
 
 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
@@ -2576,7 +2680,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
 {
 }
 
-static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+static void rcu_spawn_all_nocb_kthreads(int cpu)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(void)
 {
 }
 
@@ -2607,16 +2715,6 @@ static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
 
 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
 
-/*
- * Define RCU flavor that holds sysidle state.  This needs to be the
- * most active flavor of RCU.
- */
-#ifdef CONFIG_PREEMPT_RCU
-static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
 static int full_sysidle_state;         /* Current system-idle state. */
 #define RCU_SYSIDLE_NOT                0       /* Some CPU is not idle. */
 #define RCU_SYSIDLE_SHORT      1       /* All CPUs idle for brief period. */
@@ -2634,6 +2732,10 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
 {
        unsigned long j;
 
+       /* If there are no nohz_full= CPUs, no need to track this. */
+       if (!tick_nohz_full_enabled())
+               return;
+
        /* Adjust nesting, check for fully idle. */
        if (irq) {
                rdtp->dynticks_idle_nesting--;
@@ -2699,6 +2801,10 @@ void rcu_sysidle_force_exit(void)
  */
 static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
 {
+       /* If there are no nohz_full= CPUs, no need to track this. */
+       if (!tick_nohz_full_enabled())
+               return;
+
        /* Adjust nesting, check for already non-idle. */
        if (irq) {
                rdtp->dynticks_idle_nesting++;
@@ -2753,12 +2859,16 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
        unsigned long j;
        struct rcu_dynticks *rdtp = rdp->dynticks;
 
+       /* If there are no nohz_full= CPUs, don't check system-wide idleness. */
+       if (!tick_nohz_full_enabled())
+               return;
+
        /*
         * If some other CPU has already reported non-idle, if this is
         * not the flavor of RCU that tracks sysidle state, or if this
         * is an offline or the timekeeping CPU, nothing to do.
         */
-       if (!*isidle || rdp->rsp != rcu_sysidle_state ||
+       if (!*isidle || rdp->rsp != rcu_state_p ||
            cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
                return;
        if (rcu_gp_in_progress(rdp->rsp))
@@ -2784,7 +2894,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
  */
 static bool is_sysidle_rcu_state(struct rcu_state *rsp)
 {
-       return rsp == rcu_sysidle_state;
+       return rsp == rcu_state_p;
 }
 
 /*
@@ -2862,7 +2972,7 @@ static void rcu_sysidle_cancel(void)
 static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
                               unsigned long maxj, bool gpkt)
 {
-       if (rsp != rcu_sysidle_state)
+       if (rsp != rcu_state_p)
                return;  /* Wrong flavor, ignore. */
        if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
                return;  /* Running state machine from timekeeping CPU. */
@@ -2879,6 +2989,10 @@ static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
                                  unsigned long maxj)
 {
+       /* If there are no nohz_full= CPUs, no need to track this. */
+       if (!tick_nohz_full_enabled())
+               return;
+
        rcu_sysidle_report(rsp, isidle, maxj, true);
 }
 
@@ -2905,7 +3019,8 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
 
 /*
  * Check to see if the system is fully idle, other than the timekeeping CPU.
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts.  This is not intended to be
+ * called unless tick_nohz_full_enabled().
  */
 bool rcu_sys_is_idle(void)
 {
@@ -2931,13 +3046,12 @@ bool rcu_sys_is_idle(void)
 
                        /* Scan all the CPUs looking for nonidle CPUs. */
                        for_each_possible_cpu(cpu) {
-                               rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
+                               rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
                                rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
                                if (!isidle)
                                        break;
                        }
-                       rcu_sysidle_report(rcu_sysidle_state,
-                                          isidle, maxj, false);
+                       rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
                        oldrss = rss;
                        rss = ACCESS_ONCE(full_sysidle_state);
                }
@@ -2964,7 +3078,7 @@ bool rcu_sys_is_idle(void)
         * provided by the memory allocator.
         */
        if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
-           !rcu_gp_in_progress(rcu_sysidle_state) &&
+           !rcu_gp_in_progress(rcu_state_p) &&
            !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
                call_rcu(&rsh.rh, rcu_sysidle_cb);
        return false;
index d600af21f0225dc97d51c925efd70b3bf89014b1..ede8b25ec1ae3e76a76e93798a2081b6309fef9a 100644 (file)
@@ -211,18 +211,16 @@ EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
 /*
  * Print online/offline testing statistics.
  */
-char *torture_onoff_stats(char *page)
+void torture_onoff_stats(void)
 {
 #ifdef CONFIG_HOTPLUG_CPU
-       page += sprintf(page,
-                      "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
-                      n_online_successes, n_online_attempts,
-                      n_offline_successes, n_offline_attempts,
-                      min_online, max_online,
-                      min_offline, max_offline,
-                      sum_online, sum_offline, HZ);
+       pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
+               n_online_successes, n_online_attempts,
+               n_offline_successes, n_offline_attempts,
+               min_online, max_online,
+               min_offline, max_offline,
+               sum_online, sum_offline, HZ);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
-       return page;
 }
 EXPORT_SYMBOL_GPL(torture_onoff_stats);
 
old mode 100644 (file)
new mode 100755 (executable)
index 9f9ffcd..56f51ae
@@ -1,5 +1,5 @@
-#!/bin/sh
-# Usage: sh config2frag.sh < .config > configfrag
+#!/bin/bash
+# Usage: config2frag.sh < .config > configfrag
 #
 # Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the
 # resulting file becomes a legitimate Kconfig fragment.
index d686537dd55c6eb6e9ba36684e67d9cb609ea81b..eee31e261bf7de01c8c368745cf006daeaba2623 100755 (executable)
@@ -1,5 +1,5 @@
-#!/bin/sh
-# Usage: sh configcheck.sh .config .config-template
+#!/bin/bash
+# Usage: configcheck.sh .config .config-template
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 9c3f3d39b934609b67a48322f47a43d3b17cc653..15f1a17ca96e69695f5f266cc58198102ba1a0c9 100755 (executable)
@@ -1,6 +1,6 @@
-#!/bin/sh
+#!/bin/bash
 #
-# sh configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file [ build output dir ]
 #
 # Create a .config file from the spec file.  Run from the kernel source tree.
 # Exits with 0 if all went well, with 1 if all went well but the config
index d01b865bb1002fa00e9351e42a2648109f7e1968..b325470c01b3669e38dcdcc13e674b011f97c7b7 100644 (file)
@@ -64,6 +64,26 @@ configfrag_boot_params () {
        fi
 }
 
+# configfrag_boot_cpus bootparam-string config-fragment-file config-cpus
+#
+# Decreases number of CPUs based on any maxcpus= boot parameters specified.
+configfrag_boot_cpus () {
+       local bootargs="`configfrag_boot_params "$1" "$2"`"
+       local maxcpus
+       if echo "${bootargs}" | grep -q 'maxcpus=[0-9]'
+       then
+               maxcpus="`echo "${bootargs}" | sed -e 's/^.*maxcpus=\([0-9]*\).*$/\1/'`"
+               if test "$3" -gt "$maxcpus"
+               then
+                       echo $maxcpus
+               else
+                       echo $3
+               fi
+       else
+               echo $3
+       fi
+}
+
 # configfrag_hotplug_cpu config-fragment-file
 #
 # Returns 1 if the config fragment specifies hotplug CPU.
index 7c1e56b46de40a7c9e68eb09b54ed1550c7c6e26..00cb0db2643d4e539edb748e960541f76d82f10d 100755 (executable)
@@ -2,7 +2,7 @@
 #
 # Build a kvm-ready Linux kernel from the tree in the current directory.
 #
-# Usage: sh kvm-build.sh config-template build-dir more-configs
+# Usage: kvm-build.sh config-template build-dir more-configs
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 7f1ff1a8fc4b447e37eee1381a9525c685661708..43f764098e508fc048bb97aed3bb7247e11275bb 100755 (executable)
@@ -2,7 +2,7 @@
 #
 # Analyze a given results directory for locktorture progress.
 #
-# Usage: sh kvm-recheck-lock.sh resdir
+# Usage: kvm-recheck-lock.sh resdir
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 307c4b95f3257fa98212fba68e6983c983f768ff..d6cc07fc137fc35c78329586081a1135e5b5b850 100755 (executable)
@@ -2,7 +2,7 @@
 #
 # Analyze a given results directory for rcutorture progress.
 #
-# Usage: sh kvm-recheck-rcu.sh resdir
+# Usage: kvm-recheck-rcu.sh resdir
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 3f6c9b78d177fc1757a018e7a79487e48b3a2ee5..4f5b20f367a944f07f0443a480dd79d061031d87 100755 (executable)
@@ -4,7 +4,7 @@
 # check the build and console output for errors.  Given a directory
 # containing results directories, this recursively checks them all.
 #
-# Usage: sh kvm-recheck.sh resdir ...
+# Usage: kvm-recheck.sh resdir ...
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 0f69dcbf9def0c4af534771f400ed922346c4474..f6b2b4771b78796e8633f87b054aac64a7ddd424 100755 (executable)
@@ -6,7 +6,7 @@
 # Execute this in the source tree.  Do not run it as a background task
 # because qemu does not seem to like that much.
 #
-# Usage: sh kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
+# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
 #
 # qemu-args defaults to "-nographic", along with arguments specifying the
 #                      number of CPUs and other options generated from
@@ -140,6 +140,7 @@ fi
 # Generate -smp qemu argument.
 qemu_args="-nographic $qemu_args"
 cpu_count=`configNR_CPUS.sh $config_template`
+cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
 vcpus=`identify_qemu_vcpus`
 if test $cpu_count -gt $vcpus
 then
@@ -214,7 +215,7 @@ then
                fi
                if test $kruntime -ge $((seconds + grace))
                then
-                       echo "!!! Hang at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+                       echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
old mode 100644 (file)
new mode 100755 (executable)
index 589e9c3..e527dc9
@@ -7,7 +7,7 @@
 # Edit the definitions below to set the locations of the various directories,
 # as well as the test duration.
 #
-# Usage: sh kvm.sh [ options ]
+# Usage: kvm.sh [ options ]
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -188,7 +188,9 @@ for CF in $configs
 do
        if test -f "$CONFIGFRAG/$kversion/$CF"
        then
-               echo $CF `configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF` >> $T/cfgcpu
+               cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF`
+               cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$kversion/$CF" "$cpu_count"`
+               echo $CF $cpu_count >> $T/cfgcpu
        else
                echo "The --configs file $CF does not exist, terminating."
                exit 1
index 543230951c3852f35dbfb5b06f0dafe2bbc9c7e4..499d1e598e425c390e5ebac5293a72cebddb01b2 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Check the build output from an rcutorture run for goodness.
 # The "file" is a pathname on the local system, and "title" is
@@ -6,8 +6,7 @@
 #
 # The file must contain kernel build output.
 #
-# Usage:
-#      sh parse-build.sh file title
+# Usage: parse-build.sh file title
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 4185d4cab32e7e23f34800732bf11719539b3ef9..f962ba4cf68b6a06121b1d6f2c11ce8ed8df63ee 100755 (executable)
@@ -1,11 +1,10 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Check the console output from an rcutorture run for oopses.
 # The "file" is a pathname on the local system, and "title" is
 # a text string for error-message purposes.
 #
-# Usage:
-#      sh parse-console.sh file title
+# Usage: parse-console.sh file title
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -33,6 +32,10 @@ title="$2"
 
 . functions.sh
 
+if grep -Pq '\x00' < $file
+then
+       print_warning Console output contains nul bytes, old qemu still running?
+fi
 egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
 if test -s $T
 then
index 3455560ab4e4ef96db8ac142aeee972a633b36d3..e3c5f0705696d648cfd7d8d3d05bad143593cfab 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Check the console output from a torture run for goodness.
 # The "file" is a pathname on the local system, and "title" is
@@ -7,8 +7,7 @@
 # The file must contain torture output, but can be interspersed
 # with other dmesg text, as in console-log output.
 #
-# Usage:
-#      sh parse-torture.sh file title
+# Usage: parse-torture.sh file title
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
index 063b7079c621294f06a29d2983f8e9af0b965102..38e3895759dd6ee2b05589fd75aaea92980337ae 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=8
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
@@ -10,8 +9,7 @@ CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_TRACE=y
 CONFIG_HOTPLUG_CPU=y
-CONFIG_RCU_FANOUT=8
-CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_MAXSMP=y
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ZERO=y
 CONFIG_DEBUG_LOCK_ALLOC=n
index 0fc8a3428938f8fcd3bbb5d0a80aa9ab2161e40d..adc3abc82fb8241308a75d8a05f1168b1ec2e417 100644 (file)
@@ -1 +1 @@
-rcutorture.torture_type=rcu_bh
+rcutorture.torture_type=rcu_bh maxcpus=8
index ab6225506909c46e8c28aee43a3ccb063d33eba0..8f1017666aa7698853f43456e07cd66db52e366c 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
+CONFIG_CPUMASK_OFFSTACK=y
 CONFIG_PREEMPT_NONE=y
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=n
@@ -7,7 +8,7 @@ CONFIG_PREEMPT=n
 CONFIG_HZ_PERIODIC=n
 CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=y
-CONFIG_NO_HZ_FULL_ALL=y
+CONFIG_NO_HZ_FULL_ALL=n
 CONFIG_NO_HZ_FULL_SYSIDLE=y
 CONFIG_RCU_FAST_NO_HZ=n
 CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot
new file mode 100644 (file)
index 0000000..d446099
--- /dev/null
@@ -0,0 +1 @@
+nohz_full=2-9
index 49d134c25c04e684dfcf6a9aa278c742a9074843..4170e714f04410f0eb5b16b9c92d76add8d9e40f 100644 (file)
@@ -6,6 +6,7 @@ this case.  There are probably much better ways of doing this.
 That said, here are the commands:
 
 ------------------------------------------------------------------------
+cd tools/testing/selftests/rcutorture
 zcat /initrd.img > /tmp/initrd.img.zcat
 mkdir initrd
 cd initrd