summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree_exp.h
diff options
context:
space:
mode:
authorBoqun Feng <boqun.feng@gmail.com>2018-06-15 12:06:31 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 12:36:06 -0700
commitfcc63543650150629c8a873cbef3578770acecd9 (patch)
tree0f71000b6ba8565a16ed3fc1a6af23d0cdf470e4 /kernel/rcu/tree_exp.h
parent5257514d8885b2ebc11bf359ea1527282b47a5fd (diff)
downloadlinux-fcc63543650150629c8a873cbef3578770acecd9.tar.gz
rcu: Make expedited GPs handle CPU 0 being offline
Currently, the parallelized initialization of expedited grace periods uses the workqueue associated with each rcu_node structure's ->grplo field. This works fine unless that CPU is offline. This commit therefore uses the CPU corresponding to the lowest-numbered online CPU, or just queues the work on WORK_CPU_UNBOUND if there are no online CPUs corresponding to this rcu_node structure. Note that this patch uses cpu_is_offline() instead of the usual approach of checking bits in the rcu_node structure's ->qsmaskinitnext field. This is safe because preemption is disabled across both the cpu_is_offline() check and the call to queue_work_on(). Signed-off-by: Boqun Feng <boqun.feng@gmail.com> [ paulmck: Disable preemption to close offline race window. ] Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> [ paulmck: Apply Peter Zijlstra feedback on CPU selection. ] Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_exp.h')
-rw-r--r--kernel/rcu/tree_exp.h9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index c6385ee1af65..b3df3b770afb 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -472,6 +472,7 @@ retry_ipi:
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
smp_call_func_t func)
{
+ int cpu;
struct rcu_node *rnp;
trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -493,7 +494,13 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
continue;
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
- queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+ preempt_disable();
+ cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+ /* If all offline, queue the work on an unbound CPU. */
+ if (unlikely(cpu > rnp->grphi))
+ cpu = WORK_CPU_UNBOUND;
+ queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+ preempt_enable();
rnp->exp_need_flush = true;
}