[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <9dba462b4d09a1a8a9fbb75740b74bf91a09a3e1.1623825725.git.viresh.kumar@linaro.org>
Date: Wed, 16 Jun 2021 12:18:08 +0530
From: Viresh Kumar <viresh.kumar@...aro.org>
To: Rafael Wysocki <rjw@...ysocki.net>,
Ionela Voinescu <ionela.voinescu@....com>,
Sudeep Holla <sudeep.holla@....com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Rafael J. Wysocki" <rafael@...nel.org>
Cc: Viresh Kumar <viresh.kumar@...aro.org>, linux-pm@...r.kernel.org,
Vincent Guittot <vincent.guittot@...aro.org>,
Qian Cai <quic_qiancai@...cinc.com>,
"Paul E . McKenney" <paulmck@...nel.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH V2 2/3] arch_topology: Avoid use-after-free for scale_freq_data
Currently topology_scale_freq_tick() may end up using a pointer to
struct scale_freq_data, which was previously cleared by
topology_clear_scale_freq_source(), as there is no protection in place
here. The users of topology_clear_scale_freq_source() though needs a
guarantee that the previous scale_freq_data isn't used anymore.
Since topology_scale_freq_tick() is called from scheduler tick, we don't
want to add locking in there. Use the RCU update mechanism instead
(which is already used by the scheduler's utilization update path) to
guarantee race free updates here.
Cc: Paul E. McKenney <paulmck@...nel.org>
Signed-off-by: Viresh Kumar <viresh.kumar@...aro.org>
---
drivers/base/arch_topology.c | 27 +++++++++++++++++++++------
1 file changed, 21 insertions(+), 6 deletions(-)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index c1179edc0f3b..921312a8d957 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -18,10 +18,11 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/smp.h>
-static DEFINE_PER_CPU(struct scale_freq_data *, sft_data);
+static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
@@ -66,16 +67,20 @@ void topology_set_scale_freq_source(struct scale_freq_data *data,
if (cpumask_empty(&scale_freq_counters_mask))
scale_freq_invariant = topology_scale_freq_invariant();
+ rcu_read_lock();
+
for_each_cpu(cpu, cpus) {
- sfd = per_cpu(sft_data, cpu);
+ sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
/* Use ARCH provided counters whenever possible */
if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
- per_cpu(sft_data, cpu) = data;
+ rcu_assign_pointer(per_cpu(sft_data, cpu), data);
cpumask_set_cpu(cpu, &scale_freq_counters_mask);
}
}
+ rcu_read_unlock();
+
update_scale_freq_invariant(true);
}
EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
@@ -86,22 +91,32 @@ void topology_clear_scale_freq_source(enum scale_freq_source source,
struct scale_freq_data *sfd;
int cpu;
+ rcu_read_lock();
+
for_each_cpu(cpu, cpus) {
- sfd = per_cpu(sft_data, cpu);
+ sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
if (sfd && sfd->source == source) {
- per_cpu(sft_data, cpu) = NULL;
+ rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
}
}
+ rcu_read_unlock();
+
+ /*
+ * Make sure all references to previous sft_data are dropped to avoid
+ * use-after-free races.
+ */
+ synchronize_rcu();
+
update_scale_freq_invariant(false);
}
EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
void topology_scale_freq_tick(void)
{
- struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data);
+ struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
if (sfd)
sfd->set_freq_scale();
--
2.31.1.272.g89b43f80a514
Powered by blists - more mailing lists