[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240120025053.684838-9-yury.norov@gmail.com>
Date: Fri, 19 Jan 2024 18:50:52 -0800
From: Yury Norov <yury.norov@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ming Lei <ming.lei@...hat.com>,
linux-kernel@...r.kernel.org
Cc: Yury Norov <yury.norov@...il.com>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Breno Leitao <leitao@...ian.org>,
Nathan Chancellor <nathan@...nel.org>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
Zi Yan <ziy@...dia.com>
Subject: [PATCH 8/9] lib/group_cpus: rework group_cpus_evenly()
Leverage cleanup machinery and drop most of housekeeping code.
Particularly, drop unneeded and erroneously initialized with -ENOMEM
variable ret.
Signed-off-by: Yury Norov <yury.norov@...il.com>
---
lib/group_cpus.c | 79 +++++++++++++++---------------------------------
1 file changed, 25 insertions(+), 54 deletions(-)
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index b8c0c3ae2bbd..4c09df9eb886 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -76,6 +76,8 @@ static void free_node_to_cpumask(cpumask_var_t *masks)
kfree(masks);
}
+DEFINE_FREE(free_node_to_cpumask, cpumask_var_t *, if (_T) free_node_to_cpumask(_T));
+
static void build_node_to_cpumask(cpumask_var_t *masks)
{
int cpu;
@@ -345,26 +347,16 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
*/
struct cpumask *group_cpus_evenly(unsigned int numgrps)
{
- unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
- cpumask_var_t *node_to_cpumask;
- cpumask_var_t nmsk, npresmsk;
- int ret = -ENOMEM;
- struct cpumask *masks = NULL;
-
- if (!alloc_cpumask_var(&nmsk, GFP_KERNEL))
+ cpumask_var_t *node_to_cpumask __free(free_node_to_cpumask) = alloc_node_to_cpumask();
+ struct cpumask *masks __free(kfree) = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ cpumask_var_t npresmsk __free(free_cpumask_var) = CPUMASK_NULL;
+ cpumask_var_t nmsk __free(free_cpumask_var) = CPUMASK_NULL;
+ int curgrp, nr_present, nr_others;
+
+ if (!masks || !node_to_cpumask || !alloc_cpumask_var(&nmsk, GFP_KERNEL)
+ || !alloc_cpumask_var(&npresmsk, GFP_KERNEL))
return NULL;
- if (!alloc_cpumask_var(&npresmsk, GFP_KERNEL))
- goto fail_nmsk;
-
- node_to_cpumask = alloc_node_to_cpumask();
- if (!node_to_cpumask)
- goto fail_npresmsk;
-
- masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
- if (!masks)
- goto fail_node_to_cpumask;
-
build_node_to_cpumask(node_to_cpumask);
/*
@@ -382,11 +374,15 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
cpumask_copy(npresmsk, data_race(cpu_present_mask));
/* grouping present CPUs first */
- ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
- npresmsk, nmsk, masks);
- if (ret < 0)
- goto fail_build_affinity;
- nr_present = ret;
+ nr_present = __group_cpus_evenly(0, numgrps, node_to_cpumask, npresmsk, nmsk, masks);
+ if (nr_present < 0)
+ return NULL;
+
+ /* If npresmsk is empty */
+ if (!cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk))
+ return_ptr(masks);
+
+ curgrp = nr_present < numgrps ? nr_present : 0;
/*
* Allocate non present CPUs starting from the next group to be
@@ -394,38 +390,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
* group space, assign the non present CPUs to the already
* allocated out groups.
*/
- if (nr_present >= numgrps)
- curgrp = 0;
- else
- curgrp = nr_present;
-
- if (cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk))
- /* If npresmsk is not empty */
- ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
- npresmsk, nmsk, masks);
- else
- ret = 0;
-
- if (ret >= 0)
- nr_others = ret;
-
- fail_build_affinity:
- if (ret >= 0)
- WARN_ON(nr_present + nr_others < numgrps);
-
- fail_node_to_cpumask:
- free_node_to_cpumask(node_to_cpumask);
-
- fail_npresmsk:
- free_cpumask_var(npresmsk);
-
- fail_nmsk:
- free_cpumask_var(nmsk);
- if (ret < 0) {
- kfree(masks);
+ nr_others = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (nr_others < 0)
return NULL;
- }
- return masks;
+
+ WARN_ON(nr_present + nr_others < numgrps);
+ return_ptr(masks);
}
#else /* CONFIG_SMP */
struct cpumask *group_cpus_evenly(unsigned int numgrps)
--
2.40.1
Powered by blists - more mailing lists