[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4f7926d0d392ae88ae57815cca6a0369c8cf7cb8.1686554037.git.yu.c.chen@intel.com>
Date: Tue, 13 Jun 2023 00:18:42 +0800
From: Chen Yu <yu.c.chen@...el.com>
To: Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Ingo Molnar <mingo@...hat.com>,
Juri Lelli <juri.lelli@...hat.com>
Cc: Tim Chen <tim.c.chen@...el.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Dietmar Eggemann <dietmar.eggemann@....com>,
K Prateek Nayak <kprateek.nayak@....com>,
Abel Wu <wuyun.abel@...edance.com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>,
Len Brown <len.brown@...el.com>,
Chen Yu <yu.chen.surf@...il.com>,
Yicong Yang <yangyicong@...ilicon.com>,
linux-kernel@...r.kernel.org, Chen Yu <yu.c.chen@...el.com>
Subject: [RFC PATCH 2/4] sched/topology: Introduce nr_groups in sched_domain to indicate the number of groups
Record the number of sched groups within each sched domain. Prepare for
newidle_balance() scan depth calculation.
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
include/linux/sched/topology.h | 1 +
kernel/sched/topology.c | 10 ++++++++--
2 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 816df6cc444e..1faececd5694 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -152,6 +152,7 @@ struct sched_domain {
struct sched_domain_shared *shared;
unsigned int span_weight;
+ unsigned int nr_groups;
/*
* Span of all CPUs in this domain.
*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index ca4472281c28..255606e88956 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1023,7 +1023,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
- int i;
+ int i, nr_groups = 0;
cpumask_clear(covered);
@@ -1087,6 +1087,8 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
if (!sg)
goto fail;
+ nr_groups++;
+
sg_span = sched_group_span(sg);
cpumask_or(covered, covered, sg_span);
@@ -1100,6 +1102,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
last->next = first;
}
sd->groups = first;
+ sd->nr_groups = nr_groups;
return 0;
@@ -1233,7 +1236,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
struct sd_data *sdd = sd->private;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered;
- int i;
+ int i, nr_groups = 0;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
@@ -1248,6 +1251,8 @@ build_sched_groups(struct sched_domain *sd, int cpu)
sg = get_group(i, sdd);
+ nr_groups++;
+
cpumask_or(covered, covered, sched_group_span(sg));
if (!first)
@@ -1258,6 +1263,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
}
last->next = first;
sd->groups = first;
+ sd->nr_groups = nr_groups;
return 0;
}
--
2.25.1
Powered by blists - more mailing lists