[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240117163511.88173-2-longman@redhat.com>
Date: Wed, 17 Jan 2024 11:35:04 -0500
From: Waiman Long <longman@...hat.com>
To: Tejun Heo <tj@...nel.org>,
Zefan Li <lizefan.x@...edance.com>,
Johannes Weiner <hannes@...xchg.org>,
Frederic Weisbecker <frederic@...nel.org>,
Jonathan Corbet <corbet@....net>,
"Paul E. McKenney" <paulmck@...nel.org>,
Neeraj Upadhyay <quic_neeraju@...cinc.com>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang1211@...il.com>,
Davidlohr Bueso <dave@...olabs.net>,
Shuah Khan <shuah@...nel.org>
Cc: cgroups@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
rcu@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Mrunal Patel <mpatel@...hat.com>,
Ryan Phillips <rphillips@...hat.com>,
Brent Rowsell <browsell@...hat.com>,
Peter Hunt <pehunt@...hat.com>,
Cestmir Kalina <ckalina@...hat.com>,
Nicolas Saenz Julienne <nsaenz@...nel.org>,
Alex Gladkov <agladkov@...hat.com>,
Marcelo Tosatti <mtosatti@...hat.com>,
Phil Auld <pauld@...hat.com>,
Paul Gortmaker <paul.gortmaker@...driver.com>,
Daniel Bristot de Oliveira <bristot@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Costa Shulyupin <cshulyup@...hat.com>,
Waiman Long <longman@...hat.com>
Subject: [RFC PATCH 1/8] rcu/nocb: Pass a cpumask instead of a single CPU to offload/deoffload
From: Frederic Weisbecker <frederic@...nel.org>
Currently the interface to toggle callbacks offloading state only takes
a single CPU per call. Now driving RCU NOCB through cpusets requires
to be able to change the offloading state of a whole set of CPUs.
To make it easier, extend the (de-)offloading interface to support a
cpumask.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Zefan Li <lizefan.x@...edance.com>
Cc: Tejun Heo <tj@...nel.org>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Paul E. McKenney <paulmck@...nel.org>
Cc: Phil Auld <pauld@...hat.com>
Cc: Nicolas Saenz Julienne <nsaenz@...nel.org>
Cc: Marcelo Tosatti <mtosatti@...hat.com>
Cc: Paul Gortmaker <paul.gortmaker@...driver.com>
Cc: Waiman Long <longman@...hat.com>
Cc: Daniel Bristot de Oliveira <bristot@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
include/linux/rcupdate.h | 9 ++--
kernel/rcu/rcutorture.c | 4 +-
kernel/rcu/tree_nocb.h | 102 ++++++++++++++++++++++++++-------------
3 files changed, 76 insertions(+), 39 deletions(-)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 0746b1b0b663..b649344075d2 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -142,13 +142,14 @@ static inline void rcu_irq_work_resched(void) { }
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
-int rcu_nocb_cpu_offload(int cpu);
-int rcu_nocb_cpu_deoffload(int cpu);
+int rcu_nocb_cpumask_update(const struct cpumask *cpumask, bool offload);
void rcu_nocb_flush_deferred_wakeup(void);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
-static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
-static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
+static inline int rcu_nocb_cpumask_update(const struct cpumask *cpumask, bool offload)
+{
+ return -EINVAL;
+}
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7567ca8e743c..228a5488eb5e 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2140,10 +2140,10 @@ static int rcu_nocb_toggle(void *arg)
r = torture_random(&rand);
cpu = (r >> 1) % (maxcpu + 1);
if (r & 0x1) {
- rcu_nocb_cpu_offload(cpu);
+ rcu_nocb_cpumask_update(cpumask_of(cpu), true);
atomic_long_inc(&n_nocb_offload);
} else {
- rcu_nocb_cpu_deoffload(cpu);
+ rcu_nocb_cpumask_update(cpumask_of(cpu), false);
atomic_long_inc(&n_nocb_deoffload);
}
toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 4efbf7333d4e..60b0a15ed6e2 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1203,29 +1203,23 @@ static long rcu_nocb_rdp_deoffload(void *arg)
return 0;
}
-int rcu_nocb_cpu_deoffload(int cpu)
+static int rcu_nocb_cpu_deoffload(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
int ret = 0;
- cpus_read_lock();
- mutex_lock(&rcu_state.barrier_mutex);
- if (rcu_rdp_is_offloaded(rdp)) {
- if (cpu_online(cpu)) {
- ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
- if (!ret)
- cpumask_clear_cpu(cpu, rcu_nocb_mask);
- } else {
- pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu);
- ret = -EINVAL;
- }
- }
- mutex_unlock(&rcu_state.barrier_mutex);
- cpus_read_unlock();
+ if (cpu_is_offline(cpu))
+ return -EINVAL;
+
+ if (!rcu_rdp_is_offloaded(rdp))
+ return 0;
+
+ ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
+ if (!ret)
+ cpumask_clear_cpu(cpu, rcu_nocb_mask);
return ret;
}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
static long rcu_nocb_rdp_offload(void *arg)
{
@@ -1236,12 +1230,6 @@ static long rcu_nocb_rdp_offload(void *arg)
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
- /*
- * For now we only support re-offload, ie: the rdp must have been
- * offloaded on boot first.
- */
- if (!rdp->nocb_gp_rdp)
- return -EINVAL;
if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
return -EINVAL;
@@ -1288,29 +1276,77 @@ static long rcu_nocb_rdp_offload(void *arg)
return 0;
}
-int rcu_nocb_cpu_offload(int cpu)
+static int rcu_nocb_cpu_offload(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
- int ret = 0;
+ int ret;
+
+ if (cpu_is_offline(cpu))
+ return -EINVAL;
+
+ if (rcu_rdp_is_offloaded(rdp))
+ return 0;
+
+ ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
+ if (!ret)
+ cpumask_set_cpu(cpu, rcu_nocb_mask);
+
+ return ret;
+}
+
+int rcu_nocb_cpumask_update(const struct cpumask *cpumask, bool offload)
+{
+ int cpu;
+ int err = 0;
+ int err_cpu;
+ cpumask_var_t saved_nocb_mask;
+
+ if (!alloc_cpumask_var(&saved_nocb_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(saved_nocb_mask, rcu_nocb_mask);
cpus_read_lock();
mutex_lock(&rcu_state.barrier_mutex);
- if (!rcu_rdp_is_offloaded(rdp)) {
- if (cpu_online(cpu)) {
- ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
- if (!ret)
- cpumask_set_cpu(cpu, rcu_nocb_mask);
+ for_each_cpu(cpu, cpumask) {
+ if (offload) {
+ err = rcu_nocb_cpu_offload(cpu);
+ if (err < 0) {
+ err_cpu = cpu;
+ pr_err("NOCB: offload cpu %d failed (%d)\n", cpu, err);
+ break;
+ }
} else {
- pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu);
- ret = -EINVAL;
+ err = rcu_nocb_cpu_deoffload(cpu);
+ if (err < 0) {
+ err_cpu = cpu;
+ pr_err("NOCB: deoffload cpu %d failed (%d)\n", cpu, err);
+ break;
+ }
+ }
+ }
+
+ /* Rollback in case of error */
+ if (err < 0) {
+ err_cpu = cpu;
+ for_each_cpu(cpu, cpumask) {
+ if (err_cpu == cpu)
+ break;
+ if (cpumask_test_cpu(cpu, saved_nocb_mask))
+ WARN_ON_ONCE(rcu_nocb_cpu_offload(cpu));
+ else
+ WARN_ON_ONCE(rcu_nocb_cpu_deoffload(cpu));
}
}
+
mutex_unlock(&rcu_state.barrier_mutex);
cpus_read_unlock();
- return ret;
+ free_cpumask_var(saved_nocb_mask);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+EXPORT_SYMBOL_GPL(rcu_nocb_cpumask_update);
#ifdef CONFIG_RCU_LAZY
static unsigned long
--
2.39.3
Powered by blists - more mailing lists