[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <d4bb3b7f8911ef38e60138ec7b0a629062ae7c34.1690273854.git.yu.c.chen@intel.com>
Date: Thu, 27 Jul 2023 22:34:36 +0800
From: Chen Yu <yu.c.chen@...el.com>
To: Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Dietmar Eggemann <dietmar.eggemann@....com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>,
Chen Yu <yu.chen.surf@...il.com>,
Aaron Lu <aaron.lu@...el.com>, linux-kernel@...r.kernel.org,
Chen Yu <yu.c.chen@...el.com>
Subject: [RFC PATCH 2/7] sched/topology: Introduce nr_groups in sched_domain to indicate the number of groups
Record the number of sched groups within each sched domain. Prepare for
newidle_balance() scan depth calculation introduced by ILB_UTIL.
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
include/linux/sched/topology.h | 1 +
kernel/sched/topology.c | 10 ++++++++--
2 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 67b573d5bf28..c07f2f00317a 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -152,6 +152,7 @@ struct sched_domain {
struct sched_domain_shared *shared;
unsigned int span_weight;
+ unsigned int nr_groups;
/*
* Span of all CPUs in this domain.
*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 64212f514765..56dc564fc9a3 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1023,7 +1023,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
- int i;
+ int i, nr_groups = 0;
cpumask_clear(covered);
@@ -1087,6 +1087,8 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
if (!sg)
goto fail;
+ nr_groups++;
+
sg_span = sched_group_span(sg);
cpumask_or(covered, covered, sg_span);
@@ -1100,6 +1102,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
last->next = first;
}
sd->groups = first;
+ sd->nr_groups = nr_groups;
return 0;
@@ -1233,7 +1236,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
struct sd_data *sdd = sd->private;
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered;
- int i;
+ int i, nr_groups = 0;
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
@@ -1248,6 +1251,8 @@ build_sched_groups(struct sched_domain *sd, int cpu)
sg = get_group(i, sdd);
+ nr_groups++;
+
cpumask_or(covered, covered, sched_group_span(sg));
if (!first)
@@ -1258,6 +1263,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
}
last->next = first;
sd->groups = first;
+ sd->nr_groups = nr_groups;
return 0;
}
--
2.25.1
Powered by blists - more mailing lists