[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-b3e6aaa8d94d618e685c4df08bef991a4fb43923@git.kernel.org>
Date: Fri, 6 Apr 2018 14:53:20 -0700
From: tip-bot for Ming Lei <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: mingo@...nel.org, hch@....de, loberman@...hat.com,
linux-kernel@...r.kernel.org, hpa@...or.com, ming.lei@...hat.com,
axboe@...nel.dk, tglx@...utronix.de, hch@...radead.org
Subject: [tip:irq/core] genirq/affinity: Move actual irq vector spreading
into a helper function
Commit-ID: b3e6aaa8d94d618e685c4df08bef991a4fb43923
Gitweb: https://git.kernel.org/tip/b3e6aaa8d94d618e685c4df08bef991a4fb43923
Author: Ming Lei <ming.lei@...hat.com>
AuthorDate: Thu, 8 Mar 2018 18:53:56 +0800
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Fri, 6 Apr 2018 12:19:51 +0200
genirq/affinity: Move actual irq vector spreading into a helper function
No functional change, just prepare for converting to 2-stage irq vector
spreading.
Signed-off-by: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Christoph Hellwig <hch@....de>
Cc: Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org
Cc: Laurence Oberman <loberman@...hat.com>
Cc: Christoph Hellwig <hch@...radead.org>
Link: https://lkml.kernel.org/r/20180308105358.1506-3-ming.lei@redhat.com
---
kernel/irq/affinity.c | 97 +++++++++++++++++++++++++++++----------------------
1 file changed, 55 insertions(+), 42 deletions(-)
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 272c968d9ef1..a9c36904500c 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -94,50 +94,19 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
return nodes;
}
-/**
- * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
- * @nvecs: The total number of vectors
- * @affd: Description of the affinity requirements
- *
- * Returns the masks pointer or NULL if allocation failed.
- */
-struct cpumask *
-irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+static int irq_build_affinity_masks(int nvecs, const struct irq_affinity *affd,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk,
+ struct cpumask *masks)
{
- int n, nodes, cpus_per_vec, extra_vecs, curvec;
int affv = nvecs - affd->pre_vectors - affd->post_vectors;
int last_affv = affv + affd->pre_vectors;
+ int curvec = affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE;
- struct cpumask *masks = NULL;
- cpumask_var_t nmsk, *node_to_cpumask;
+ int n, nodes, cpus_per_vec, extra_vecs;
- /*
- * If there aren't any vectors left after applying the pre/post
- * vectors don't bother with assigning affinity.
- */
- if (!affv)
- return NULL;
-
- if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
- return NULL;
-
- node_to_cpumask = alloc_node_to_cpumask();
- if (!node_to_cpumask)
- goto outcpumsk;
-
- masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
- if (!masks)
- goto outnodemsk;
-
- /* Fill out vectors at the beginning that don't need affinity */
- for (curvec = 0; curvec < affd->pre_vectors; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
-
- /* Stabilize the cpumasks */
- get_online_cpus();
- build_node_to_cpumask(node_to_cpumask);
- nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_possible_mask,
- &nodemsk);
+ nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
/*
* If the number of nodes in the mask is greater than or equal the
@@ -150,7 +119,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (++curvec == last_affv)
break;
}
- goto done;
+ goto out;
}
for_each_node_mask(n, nodemsk) {
@@ -160,7 +129,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
/* Get the cpus on this node which are in the mask */
- cpumask_and(nmsk, cpu_possible_mask, node_to_cpumask[n]);
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
@@ -186,7 +155,51 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
--nodes;
}
-done:
+out:
+ return curvec - affd->pre_vectors;
+}
+
+/**
+ * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
+ * @nvecs: The total number of vectors
+ * @affd: Description of the affinity requirements
+ *
+ * Returns the masks pointer or NULL if allocation failed.
+ */
+struct cpumask *
+irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+{
+ cpumask_var_t nmsk, *node_to_cpumask;
+ struct cpumask *masks = NULL;
+ int curvec;
+
+ /*
+ * If there aren't any vectors left after applying the pre/post
+ * vectors don't bother with assigning affinity.
+ */
+ if (nvecs == affd->pre_vectors + affd->post_vectors)
+ return NULL;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto outcpumsk;
+
+ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto outnodemsk;
+
+ /* Fill out vectors at the beginning that don't need affinity */
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+ cpumask_copy(masks + curvec, irq_default_affinity);
+
+ /* Stabilize the cpumasks */
+ get_online_cpus();
+ build_node_to_cpumask(node_to_cpumask);
+ curvec += irq_build_affinity_masks(nvecs, affd, node_to_cpumask,
+ cpu_possible_mask, nmsk, masks);
put_online_cpus();
/* Fill out vectors at the end that don't need affinity */
Powered by blists - more mailing lists