rcu: Remove waitqueue usage for cpu, node, and boost kthreads
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 20 May 2011 23:06:29 +0000 (16:06 -0700)
committerIngo Molnar <mingo@elte.hu>
Sat, 28 May 2011 15:41:52 +0000 (17:41 +0200)
It is not necessary to use waitqueues for the RCU kthreads because
we always know exactly which thread is to be awakened.  In addition,
wake_up() only issues an actual wakeup when there is a thread waiting on
the queue, which was why there was an extra explicit wake_up_process()
to get the RCU kthreads started.

Eliminating the waitqueues (and wake_up()) in favor of wake_up_process()
eliminates the need for the initial wake_up_process() and also shrinks
the data structure size a bit.  The wakeup logic is placed in a new
rcu_wait() macro.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h

index 5d96d68d20f8b666be15c6e2e6ac1c86f2d0af47..05e254e930e30827efcd25c35093dd7ff70b2be9 100644 (file)
@@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
-static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 static char rcu_kthreads_spawnable;
 
@@ -1476,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void)
                local_irq_restore(flags);
                return;
        }
-       wake_up(&__get_cpu_var(rcu_cpu_wq));
+       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
        local_irq_restore(flags);
 }
 
@@ -1596,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg)
        unsigned long flags;
        int spincnt = 0;
        unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
-       wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
        char work;
        char *workp = &per_cpu(rcu_cpu_has_work, cpu);
 
        for (;;) {
                *statusp = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(*wqp,
-                                        *workp != 0 || kthread_should_stop());
+               rcu_wait(*workp != 0 || kthread_should_stop());
                local_bh_disable();
                if (rcu_cpu_kthread_should_stop(cpu)) {
                        local_bh_enable();
@@ -1654,7 +1651,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
        WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
        per_cpu(rcu_cpu_kthread_task, cpu) = t;
-       wake_up_process(t);
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        return 0;
@@ -1677,8 +1673,7 @@ static int rcu_node_kthread(void *arg)
 
        for (;;) {
                rnp->node_kthread_status = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(rnp->node_wq,
-                                        atomic_read(&rnp->wakemask) != 0);
+               rcu_wait(atomic_read(&rnp->wakemask) != 0);
                rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                mask = atomic_xchg(&rnp->wakemask, 0);
@@ -1762,7 +1757,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rnp->node_kthread_task = t;
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               wake_up_process(t);
                sp.sched_priority = 99;
                sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        }
@@ -1779,21 +1773,16 @@ static int __init rcu_spawn_kthreads(void)
 
        rcu_kthreads_spawnable = 1;
        for_each_possible_cpu(cpu) {
-               init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
                per_cpu(rcu_cpu_has_work, cpu) = 0;
                if (cpu_online(cpu))
                        (void)rcu_spawn_one_cpu_kthread(cpu);
        }
        rnp = rcu_get_root(rcu_state);
-       init_waitqueue_head(&rnp->node_wq);
-       rcu_init_boost_waitqueue(rnp);
        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-       if (NUM_RCU_NODES > 1)
-               rcu_for_each_leaf_node(rcu_state, rnp) {
-                       init_waitqueue_head(&rnp->node_wq);
-                       rcu_init_boost_waitqueue(rnp);
+       if (NUM_RCU_NODES > 1) {
+               rcu_for_each_leaf_node(rcu_state, rnp)
                        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
-               }
+       }
        return 0;
 }
 early_initcall(rcu_spawn_kthreads);
index 561dcb9a8d2c269e42e19b2263bdd82ee1a1b452..7b9a08b4aaea01d48eacb8781296a7be205fb898 100644 (file)
@@ -159,9 +159,6 @@ struct rcu_node {
        struct task_struct *boost_kthread_task;
                                /* kthread that takes care of priority */
                                /*  boosting for this rcu_node structure. */
-       wait_queue_head_t boost_wq;
-                               /* Wait queue on which to park the boost */
-                               /*  kthread. */
        unsigned int boost_kthread_status;
                                /* State of boost_kthread_task for tracing. */
        unsigned long n_tasks_boosted;
@@ -188,9 +185,6 @@ struct rcu_node {
                                /* kthread that takes care of this rcu_node */
                                /*  structure, for example, awakening the */
                                /*  per-CPU kthreads as needed. */
-       wait_queue_head_t node_wq;
-                               /* Wait queue on which to park the per-node */
-                               /*  kthread. */
        unsigned int node_kthread_status;
                                /* State of node_kthread_task for tracing. */
 } ____cacheline_internodealigned_in_smp;
@@ -336,6 +330,16 @@ struct rcu_data {
                                                /*  scheduling clock irq */
                                                /*  before ratting on them. */
 
+#define rcu_wait(cond)                                                 \
+do {                                                                   \
+       for (;;) {                                                      \
+               set_current_state(TASK_INTERRUPTIBLE);                  \
+               if (cond)                                               \
+                       break;                                          \
+               schedule();                                             \
+       }                                                               \
+       __set_current_state(TASK_RUNNING);                              \
+} while (0)
 
 /*
  * RCU global state, including node hierarchy.  This hierarchy is
@@ -445,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
 static void rcu_preempt_send_cbs_to_online(void);
 static void __init __rcu_init_preempt(void);
 static void rcu_needs_cpu_flush(void);
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
                                          cpumask_var_t cm);
index ed339702481dfc3a7746a10a38cc3309fa82e238..049f2787a9840cf0c944aeda59afeffbee6f57a5 100644 (file)
@@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg)
 
        for (;;) {
                rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
-                                                       rnp->exp_tasks);
+               rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
                rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
                more2boost = rcu_boost(rnp);
                if (more2boost)
@@ -1274,14 +1273,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
        rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
 }
 
-/*
- * Initialize the RCU-boost waitqueue.
- */
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
-       init_waitqueue_head(&rnp->boost_wq);
-}
-
 /*
  * Create an RCU-boost kthread for the specified node if one does not
  * already exist.  We only create this kthread for preemptible RCU.
@@ -1306,7 +1297,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       wake_up_process(t);
        sp.sched_priority = RCU_KTHREAD_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        return 0;
@@ -1328,10 +1318,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
 
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
-}
-
 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                                                 struct rcu_node *rnp,
                                                 int rnp_index)