[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251013041348.350886-1-luogengkun@huaweicloud.com>
Date: Mon, 13 Oct 2025 04:13:48 +0000
From: Luo Gengkun <luogengkun@...weicloud.com>
To: mingo@...hat.com
Cc: peterz@...radead.org,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
huang.ying.caritas@...il.com,
linux-kernel@...r.kernel.org
Subject: [PATCH] sched/topology: Fix memory leak in the error path of sched_init_numa
In sched_init_numa, masks are used to store memory, but the error path
returns directly without freeing the allocated memory.
To fix this, the freeing logic in sched_reset_numa can be extraced into a
new function, free_masks, which can be called on the error path.
Fixes: 0fb3978b0aac ("sched/numa: Fix NUMA topology for systems with CPU-less nodes")
Signed-off-by: Luo Gengkun <luogengkun@...weicloud.com>
---
kernel/sched/topology.c | 33 +++++++++++++++++++++------------
1 file changed, 21 insertions(+), 12 deletions(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 444bdfdab731..fd03bb6669f5 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1924,6 +1924,20 @@ static void init_numa_topology_type(int offline_node)
#define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
+static void free_masks(struct cpumask ***masks, int nr_levels)
+{
+ int i, j;
+
+ for (i = 0; i < nr_levels && masks; i++) {
+ if (!masks[i])
+ continue;
+ for_each_node(j)
+ kfree(masks[i][j]);
+ kfree(masks[i]);
+ }
+ kfree(masks);
+}
+
void sched_init_numa(int offline_node)
{
struct sched_domain_topology_level *tl;
@@ -2003,15 +2017,19 @@ void sched_init_numa(int offline_node)
*/
for (i = 0; i < nr_levels; i++) {
masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
- if (!masks[i])
+ if (!masks[i]) {
+ free_masks(masks, nr_levels);
return;
+ }
for_each_cpu_node_but(j, offline_node) {
struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
int k;
- if (!mask)
+ if (!mask) {
+ free_masks(masks, nr_levels);
return;
+ }
masks[i][j] = mask;
@@ -2079,18 +2097,9 @@ static void sched_reset_numa(void)
masks = sched_domains_numa_masks;
rcu_assign_pointer(sched_domains_numa_masks, NULL);
if (distances || masks) {
- int i, j;
-
synchronize_rcu();
kfree(distances);
- for (i = 0; i < nr_levels && masks; i++) {
- if (!masks[i])
- continue;
- for_each_node(j)
- kfree(masks[i][j]);
- kfree(masks[i]);
- }
- kfree(masks);
+ free_masks(masks, nr_levels);
}
if (sched_domain_topology_saved) {
kfree(sched_domain_topology);
--
2.34.1
Powered by blists - more mailing lists