[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240620085233.205690-3-xavier_qy@163.com>
Date: Thu, 20 Jun 2024 16:52:33 +0800
From: Xavier <xavier_qy@....com>
To: longman@...hat.com,
mkoutny@...e.com
Cc: lizefan.x@...edance.com,
tj@...nel.org,
hannes@...xchg.org,
cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org,
Xavier <xavier_qy@....com>
Subject: [PATCH v4 v4 2/2] cpuset: use Union-Find to optimize the merging of cpumasks
The process of constructing scheduling domains
involves multiple loops and repeated evaluations, leading to numerous
redundant and ineffective assessments that impact code efficiency.
Here, we use Union-Find to optimize the merging of cpumasks. By employing
path compression and union by rank, we effectively reduce the number of
lookups and merge comparisons.
Signed-off-by: Xavier <xavier_qy@....com>
---
kernel/cgroup/cpuset.c | 95 +++++++++++++++---------------------------
1 file changed, 34 insertions(+), 61 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index fe76045aa5..7e527530f8 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -45,6 +45,7 @@
#include <linux/cgroup.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/union_find.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
@@ -172,9 +173,6 @@ struct cpuset {
*/
int attach_in_progress;
- /* partition number for rebuild_sched_domains() */
- int pn;
-
/* for custom sched domain */
int relax_domain_level;
@@ -1007,7 +1005,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
struct cpuset *cp; /* top-down scan of cpusets */
struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */
- int i, j, k; /* indices for partition finding loops */
+ int i, j; /* indices for partition finding loops */
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
@@ -1015,6 +1013,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
struct cgroup_subsys_state *pos_css;
bool root_load_balance = is_sched_load_balance(&top_cpuset);
bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
+ struct uf_node *nodes;
+ int nslot_update;
doms = NULL;
dattr = NULL;
@@ -1102,40 +1102,31 @@ static int generate_sched_domains(cpumask_var_t **domains,
if (root_load_balance && (csn == 1))
goto single_root_domain;
- for (i = 0; i < csn; i++)
- csa[i]->pn = i;
- ndoms = csn;
+ nodes = uf_nodes_alloc(csn);
+ if (!nodes)
+ goto done;
-restart:
- /* Find the best partition (set of sched domains) */
+ /* Merge overlapping cpusets */
for (i = 0; i < csn; i++) {
- struct cpuset *a = csa[i];
- int apn = a->pn;
-
- for (j = 0; j < csn; j++) {
- struct cpuset *b = csa[j];
- int bpn = b->pn;
-
- if (apn != bpn && cpusets_overlap(a, b)) {
- for (k = 0; k < csn; k++) {
- struct cpuset *c = csa[k];
-
- if (c->pn == bpn)
- c->pn = apn;
- }
- ndoms--; /* one less element */
- goto restart;
- }
+ for (j = i + 1; j < csn; j++) {
+ if (cpusets_overlap(csa[i], csa[j]))
+ uf_union(&nodes[i], &nodes[j]);
}
}
+ /* Count the total number of domains */
+ for (i = 0; i < csn; i++) {
+ if ((nodes[i].parent == &nodes[i]) || !nodes[i].parent)
+ ndoms++;
+ }
+
/*
* Now we know how many domains to create.
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
*/
doms = alloc_sched_domains(ndoms);
if (!doms)
- goto done;
+ goto free;
/*
* The rest of the code, including the scheduler, can deal with
@@ -1159,47 +1150,29 @@ static int generate_sched_domains(cpumask_var_t **domains,
}
for (nslot = 0, i = 0; i < csn; i++) {
- struct cpuset *a = csa[i];
- struct cpumask *dp;
- int apn = a->pn;
-
- if (apn < 0) {
- /* Skip completed partitions */
- continue;
- }
-
- dp = doms[nslot];
-
- if (nslot == ndoms) {
- static int warnings = 10;
- if (warnings) {
- pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
- nslot, ndoms, csn, i, apn);
- warnings--;
- }
- continue;
- }
-
- cpumask_clear(dp);
- if (dattr)
- *(dattr + nslot) = SD_ATTR_INIT;
+ nslot_update = 0;
for (j = i; j < csn; j++) {
- struct cpuset *b = csa[j];
-
- if (apn == b->pn) {
- cpumask_or(dp, dp, b->effective_cpus);
+ if (uf_find(&nodes[j]) == &nodes[i]) {
+ struct cpumask *dp = doms[nslot];
+
+ if (i == j) {
+ nslot_update = 1;
+ cpumask_clear(dp);
+ if (dattr)
+ *(dattr + nslot) = SD_ATTR_INIT;
+ }
+ cpumask_or(dp, dp, csa[j]->effective_cpus);
cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
if (dattr)
- update_domain_attr_tree(dattr + nslot, b);
-
- /* Done with this partition */
- b->pn = -1;
+ update_domain_attr_tree(dattr + nslot, csa[j]);
}
}
- nslot++;
+ if (nslot_update)
+ nslot++;
}
BUG_ON(nslot != ndoms);
-
+free:
+ uf_nodes_free(nodes);
done:
kfree(csa);
--
2.45.2
Powered by blists - more mailing lists