[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250702-isolcpus-io-queues-v7-1-557aa7eacce4@kernel.org>
Date: Wed, 02 Jul 2025 18:33:51 +0200
From: Daniel Wagner <wagi@...nel.org>
To: Jens Axboe <axboe@...nel.dk>, Keith Busch <kbusch@...nel.org>,
Christoph Hellwig <hch@....de>, Sagi Grimberg <sagi@...mberg.me>,
"Michael S. Tsirkin" <mst@...hat.com>
Cc: Aaron Tomlin <atomlin@...mlin.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Costa Shulyupin <costa.shul@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Valentin Schneider <vschneid@...hat.com>, Waiman Long <llong@...hat.com>,
Ming Lei <ming.lei@...hat.com>, Frederic Weisbecker <frederic@...nel.org>,
Mel Gorman <mgorman@...e.de>, Hannes Reinecke <hare@...e.de>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
linux-nvme@...ts.infradead.org, megaraidlinux.pdl@...adcom.com,
linux-scsi@...r.kernel.org, storagedev@...rochip.com,
virtualization@...ts.linux.dev, GR-QLogic-Storage-Upstream@...vell.com,
Daniel Wagner <wagi@...nel.org>
Subject: [PATCH v7 01/10] lib/group_cpus: Add group_masks_cpus_evenly()
group_mask_cpus_evenly() allows the caller to pass in a CPU mask that
should be evenly distributed. This new function is a more generic
version of the existing group_cpus_evenly(), which always distributes
all present CPUs into groups.
Signed-off-by: Daniel Wagner <wagi@...nel.org>
---
include/linux/group_cpus.h | 3 +++
lib/group_cpus.c | 64 +++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 66 insertions(+), 1 deletion(-)
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
index 9d4e5ab6c314b31c09fda82c3f6ac18f77e9de36..d4604dce1316a08400e982039006331f34c18ee8 100644
--- a/include/linux/group_cpus.h
+++ b/include/linux/group_cpus.h
@@ -10,5 +10,8 @@
#include <linux/cpu.h>
struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks);
+struct cpumask *group_mask_cpus_evenly(unsigned int numgrps,
+ const struct cpumask *cpu_mask,
+ unsigned int *nummasks);
#endif
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index 6d08ac05f371bf880571507d935d9eb501616a84..00c9b7a10c8acd29239fe20d2a30fdae22ef74a5 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/sort.h>
#include <linux/group_cpus.h>
+#include <linux/sched/isolation.h>
#ifdef CONFIG_SMP
@@ -425,6 +426,59 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
*nummasks = min(nr_present + nr_others, numgrps);
return masks;
}
+EXPORT_SYMBOL_GPL(group_cpus_evenly);
+
+/**
+ * group_mask_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+ * @cpu_mask: CPU to consider for the grouping
+ * @nummasks: number of initialized cpusmasks
+ *
+ * Return: cpumask array if successful, NULL otherwise. And each element
+ * includes CPUs assigned to this group.
+ *
+ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
+ * same group. Allocate present CPUs on these groups evenly.
+ */
+struct cpumask *group_mask_cpus_evenly(unsigned int numgrps,
+ const struct cpumask *cpu_mask,
+ unsigned int *nummasks)
+{
+ cpumask_var_t *node_to_cpumask;
+ cpumask_var_t nmsk;
+ int ret = -ENOMEM;
+ struct cpumask *masks = NULL;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto fail_nmsk;
+
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto fail_node_to_cpumask;
+
+ build_node_to_cpumask(node_to_cpumask);
+
+ ret = __group_cpus_evenly(0, numgrps, node_to_cpumask, cpu_mask, nmsk,
+ masks);
+
+fail_node_to_cpumask:
+ free_node_to_cpumask(node_to_cpumask);
+
+fail_nmsk:
+ free_cpumask_var(nmsk);
+ if (ret < 0) {
+ kfree(masks);
+ return NULL;
+ }
+ *nummasks = ret;
+ return masks;
+}
+EXPORT_SYMBOL_GPL(group_mask_cpus_evenly);
+
#else /* CONFIG_SMP */
struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
{
@@ -442,5 +496,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
*nummasks = 1;
return masks;
}
-#endif /* CONFIG_SMP */
EXPORT_SYMBOL_GPL(group_cpus_evenly);
+
+struct cpumask *group_mask_cpus_evenly(unsigned int numgrps,
+ const struct cpumask *cpu_mask,
+ unsigned int *nummasks)
+{
+ return group_cpus_evenly(numgrps, nummasks);
+}
+EXPORT_SYMBOL_GPL(group_mask_cpus_evenly);
+#endif /* CONFIG_SMP */
--
2.50.0
Powered by blists - more mailing lists