[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <0d71de8648889fe8b202be376e97d581ff3f12ed.1690273854.git.yu.c.chen@intel.com>
Date: Thu, 27 Jul 2023 22:34:50 +0800
From: Chen Yu <yu.c.chen@...el.com>
To: Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Dietmar Eggemann <dietmar.eggemann@....com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>,
Chen Yu <yu.chen.surf@...il.com>,
Aaron Lu <aaron.lu@...el.com>, linux-kernel@...r.kernel.org,
Chen Yu <yu.c.chen@...el.com>
Subject: [RFC PATCH 3/7] sched/fair: Save a snapshot of sched domain total_load and total_capacity
Save the total_load, total_capacity of the current sched domain in each
periodic load balance. This statistic can be used later by CPU_NEWLY_IDLE
load balance if it quits the scan earlier. Introduce a sched feature
ILB_SNAPSHOT to control this. Code can check if sd_share->total_capacity
is non-zero to verify if the stat is valid.
In theory, if the system has reached a stable status, the total_capacity
and total_load should not change dramatically.
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
include/linux/sched/topology.h | 2 ++
kernel/sched/fair.c | 25 +++++++++++++++++++++++++
kernel/sched/features.h | 2 ++
3 files changed, 29 insertions(+)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index c07f2f00317a..d6a64a2c92aa 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -82,6 +82,8 @@ struct sched_domain_shared {
atomic_t nr_busy_cpus;
int has_idle_cores;
int nr_idle_scan;
+ unsigned long total_load;
+ unsigned long total_capacity;
};
struct sched_domain {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b3e25be58e2b..edcfee9965cd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10132,6 +10132,27 @@ static void update_idle_cpu_scan(struct lb_env *env,
WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
}
+static void ilb_save_stats(struct lb_env *env,
+ struct sched_domain_shared *sd_share,
+ struct sd_lb_stats *sds)
+{
+ if (!sched_feat(ILB_SNAPSHOT))
+ return;
+
+ if (!sd_share)
+ return;
+
+ /* newidle balance is too frequent */
+ if (env->idle == CPU_NEWLY_IDLE)
+ return;
+
+ if (sds->total_load != sd_share->total_load)
+ WRITE_ONCE(sd_share->total_load, sds->total_load);
+
+ if (sds->total_capacity != sd_share->total_capacity)
+ WRITE_ONCE(sd_share->total_capacity, sds->total_capacity);
+}
+
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
@@ -10140,6 +10161,7 @@ static void update_idle_cpu_scan(struct lb_env *env,
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{
+ struct sched_domain_shared *sd_share = env->sd->shared;
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
@@ -10209,6 +10231,9 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
}
update_idle_cpu_scan(env, sum_util);
+
+ /* save a snapshot of stats during periodic load balance */
+ ilb_save_stats(env, sd_share, sds);
}
/**
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ee7f23c76bd3..3cb71c8cddc0 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -101,3 +101,5 @@ SCHED_FEAT(LATENCY_WARN, false)
SCHED_FEAT(ALT_PERIOD, true)
SCHED_FEAT(BASE_SLICE, true)
+
+SCHED_FEAT(ILB_SNAPSHOT, true)
--
2.25.1
Powered by blists - more mailing lists