[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <167397809771.4906.11909600068519788572.tip-bot2@tip-bot2>
Date: Tue, 17 Jan 2023 17:54:57 -0000
From: "tip-bot2 for Ming Lei" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Ming Lei <ming.lei@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Christoph Hellwig <hch@....de>,
John Garry <john.g.garry@...cle.com>,
Jens Axboe <axboe@...nel.dk>, x86@...nel.org,
linux-kernel@...r.kernel.org, maz@...nel.org
Subject: [tip: irq/core] genirq/affinity: Don't pass irq_affinity_desc array
to irq_build_affinity_masks
The following commit has been merged into the irq/core branch of tip:
Commit-ID: e7bdd7f0cbd1c001bb9b4d3313edc5ee094bc3f8
Gitweb: https://git.kernel.org/tip/e7bdd7f0cbd1c001bb9b4d3313edc5ee094bc3f8
Author: Ming Lei <ming.lei@...hat.com>
AuthorDate: Tue, 27 Dec 2022 10:29:02 +08:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Tue, 17 Jan 2023 18:50:06 +01:00
genirq/affinity: Don't pass irq_affinity_desc array to irq_build_affinity_masks
Prepare for abstracting irq_build_affinity_masks() into a public function
for assigning all CPUs evenly into several groups.
Don't pass irq_affinity_desc array to irq_build_affinity_masks, instead
return a cpumask array by storing each assigned group into one element of
the array.
This allows to provide a generic interface for grouping all CPUs evenly
from a NUMA and CPU locality viewpoint, and the cost is one extra allocation
in irq_build_affinity_masks(), which should be fine since it is done via
GFP_KERNEL and irq_build_affinity_masks() is a slow path anyway.
Signed-off-by: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Christoph Hellwig <hch@....de>
Reviewed-by: John Garry <john.g.garry@...cle.com>
Reviewed-by: Jens Axboe <axboe@...nel.dk>
Link: https://lore.kernel.org/r/20221227022905.352674-4-ming.lei@redhat.com
---
kernel/irq/affinity.c | 34 ++++++++++++++++++++++++----------
1 file changed, 24 insertions(+), 10 deletions(-)
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index da6379c..00bba10 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -249,7 +249,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
struct cpumask *nmsk,
- struct irq_affinity_desc *masks)
+ struct cpumask *masks)
{
unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
unsigned int last_affv = numvecs;
@@ -270,7 +270,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
for_each_node_mask(n, nodemsk) {
/* Ensure that only CPUs which are in both masks are set */
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
- cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
+ cpumask_or(&masks[curvec], &masks[curvec], nmsk);
if (++curvec == last_affv)
curvec = 0;
}
@@ -321,7 +321,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
*/
if (curvec >= last_affv)
curvec = 0;
- irq_spread_init_one(&masks[curvec].mask, nmsk,
+ irq_spread_init_one(&masks[curvec], nmsk,
cpus_per_vec);
}
done += nv->nvectors;
@@ -335,16 +335,16 @@ static int __irq_build_affinity_masks(unsigned int startvec,
* 1) spread present CPU on these vectors
* 2) spread other possible CPUs on these vectors
*/
-static int irq_build_affinity_masks(unsigned int numvecs,
- struct irq_affinity_desc *masks)
+static struct cpumask *irq_build_affinity_masks(unsigned int numvecs)
{
unsigned int curvec = 0, nr_present = 0, nr_others = 0;
cpumask_var_t *node_to_cpumask;
cpumask_var_t nmsk, npresmsk;
int ret = -ENOMEM;
+ struct cpumask *masks = NULL;
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
- return ret;
+ return NULL;
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
goto fail_nmsk;
@@ -353,6 +353,10 @@ static int irq_build_affinity_masks(unsigned int numvecs,
if (!node_to_cpumask)
goto fail_npresmsk;
+ masks = kcalloc(numvecs, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto fail_node_to_cpumask;
+
/* Stabilize the cpumasks */
cpus_read_lock();
build_node_to_cpumask(node_to_cpumask);
@@ -386,6 +390,7 @@ static int irq_build_affinity_masks(unsigned int numvecs,
if (ret >= 0)
WARN_ON(nr_present + nr_others < numvecs);
+ fail_node_to_cpumask:
free_node_to_cpumask(node_to_cpumask);
fail_npresmsk:
@@ -393,7 +398,11 @@ static int irq_build_affinity_masks(unsigned int numvecs,
fail_nmsk:
free_cpumask_var(nmsk);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ kfree(masks);
+ return NULL;
+ }
+ return masks;
}
static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
@@ -457,13 +466,18 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
*/
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
unsigned int this_vecs = affd->set_size[i];
- int ret;
+ int j;
+ struct cpumask *result = irq_build_affinity_masks(this_vecs);
- ret = irq_build_affinity_masks(this_vecs, &masks[curvec]);
- if (ret) {
+ if (!result) {
kfree(masks);
return NULL;
}
+
+ for (j = 0; j < this_vecs; j++)
+ cpumask_copy(&masks[curvec + j].mask, &result[j]);
+ kfree(result);
+
curvec += this_vecs;
usedvecs += this_vecs;
}
Powered by blists - more mailing lists