lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <49789cee643fcef7827d2602af35f1198e8a28d0.1686554037.git.yu.c.chen@intel.com>
Date:   Tue, 13 Jun 2023 00:18:19 +0800
From:   Chen Yu <yu.c.chen@...el.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Ingo Molnar <mingo@...hat.com>,
        Juri Lelli <juri.lelli@...hat.com>
Cc:     Tim Chen <tim.c.chen@...el.com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        K Prateek Nayak <kprateek.nayak@....com>,
        Abel Wu <wuyun.abel@...edance.com>,
        "Gautham R . Shenoy" <gautham.shenoy@....com>,
        Len Brown <len.brown@...el.com>,
        Chen Yu <yu.chen.surf@...il.com>,
        Yicong Yang <yangyicong@...ilicon.com>,
        linux-kernel@...r.kernel.org, Chen Yu <yu.c.chen@...el.com>
Subject: [RFC PATCH 1/4] sched/fair: Extract the function to get the sd_llc_shared

Introduce get_llc_shared() to get the sd_llc_shared of dst_cpu, if
the current domain is in LLC. Let SIS_UTIL be the first user to use
this function. Prepare for later use by ILB_UTIL.

No functional change is intended.

Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
 kernel/sched/fair.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6189d1a45635..b3a24aead848 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10043,10 +10043,21 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
 	return idlest;
 }
 
+/* Get the LLC shared information of dst CPU if doing balance in LLC */
+static struct sched_domain_shared *get_llc_shared(struct lb_env *env)
+{
+	struct sched_domain_shared *sd_share = NULL;
+
+	if (per_cpu(sd_llc_size, env->dst_cpu) == env->sd->span_weight)
+		sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
+
+	return sd_share;
+}
+
 static void update_idle_cpu_scan(struct lb_env *env,
-				 unsigned long sum_util)
+				 unsigned long sum_util,
+				 struct sched_domain_shared *sd_share)
 {
-	struct sched_domain_shared *sd_share;
 	int llc_weight, pct;
 	u64 x, y, tmp;
 	/*
@@ -10060,14 +10071,11 @@ static void update_idle_cpu_scan(struct lb_env *env,
 	if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
 		return;
 
-	llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
-	if (env->sd->span_weight != llc_weight)
-		return;
-
-	sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
 	if (!sd_share)
 		return;
 
+	llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
+
 	/*
 	 * The number of CPUs to search drops as sum_util increases, when
 	 * sum_util hits 85% or above, the scan stops.
@@ -10122,6 +10130,7 @@ static void update_idle_cpu_scan(struct lb_env *env,
 
 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
 {
+	struct sched_domain_shared *sd_share = get_llc_shared(env);
 	struct sched_group *sg = env->sd->groups;
 	struct sg_lb_stats *local = &sds->local_stat;
 	struct sg_lb_stats tmp_sgs;
@@ -10190,7 +10199,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 		trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
 	}
 
-	update_idle_cpu_scan(env, sum_util);
+	update_idle_cpu_scan(env, sum_util, sd_share);
 }
 
 /**
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ