rcu: Switch callers from rcu_process_gp_end() to note_gp_changes()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 19 Mar 2013 18:32:11 +0000 (11:32 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 10 Jun 2013 20:39:43 +0000 (13:39 -0700)
Because note_gp_changes() now incorporates rcu_process_gp_end() function,
this commit switches to the former and eliminates the latter.  In
addition, this commit changes external calls from __rcu_process_gp_end()
to __note_gp_changes().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
kernel/rcutree.c
kernel/rcutree_plugin.h

index 7eb2bc95300a1c8b42f58d1846a0fc558c7e9706..b04f134ab8bcdee18a596f024f4928d3908eb63a 100644 (file)
@@ -1343,28 +1343,6 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
-/*
- * Advance this CPU's callbacks, but only if the current grace period
- * has ended.  This may be called only from the CPU to whom the rdp
- * belongs.
- */
-static void
-rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
-{
-       unsigned long flags;
-       struct rcu_node *rnp;
-
-       local_irq_save(flags);
-       rnp = rdp->mynode;
-       if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
-           !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
-               local_irq_restore(flags);
-               return;
-       }
-       __rcu_process_gp_end(rsp, rnp, rdp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-}
-
 /*
  * Did someone else start a new RCU grace period start since we last
  * checked?  Update local state appropriately if so.  Must be called
@@ -1393,9 +1371,6 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
 static void
 rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
 {
-       /* Prior grace period ended, so advance callbacks for current CPU. */
-       __rcu_process_gp_end(rsp, rnp, rdp);
-
        /* Set state so that this CPU will detect the next quiescent state. */
        __note_gp_changes(rsp, rnp, rdp);
 }
@@ -1531,7 +1506,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                ACCESS_ONCE(rnp->completed) = rsp->gpnum;
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
-                       __rcu_process_gp_end(rsp, rnp, rdp);
+                       __note_gp_changes(rsp, rnp, rdp);
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched();
@@ -2276,7 +2251,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        WARN_ON_ONCE(rdp->beenonline == 0);
 
        /* Handle the end of a grace period that some other CPU ended.  */
-       rcu_process_gp_end(rsp, rdp);
+       note_gp_changes(rsp, rdp);
 
        /* Update RCU state based on any recent quiescent states. */
        rcu_check_quiescent_state(rsp, rdp);
@@ -2362,7 +2337,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
        if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
 
                /* Are we ignoring a completed grace period? */
-               rcu_process_gp_end(rsp, rdp);
+               note_gp_changes(rsp, rdp);
                check_for_new_grace_period(rsp, rdp);
 
                /* Start a new grace period if one not already started. */
index 207844ea02260ed19910057171fdc328da16da52..f279148a0168035ea40f1b3141f1572d9959eda2 100644 (file)
@@ -1628,7 +1628,7 @@ static bool rcu_try_advance_all_cbs(void)
                 */
                if (rdp->completed != rnp->completed &&
                    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
-                       rcu_process_gp_end(rsp, rdp);
+                       note_gp_changes(rsp, rdp);
 
                if (cpu_has_callbacks_ready_to_invoke(rdp))
                        cbs_ready = true;