lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 17 Feb 2022 23:43:59 +0800
From:   Abel Wu <wuyun.abel@...edance.com>
To:     Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Juri Lelli <juri.lelli@...hat.com>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
        Daniel Bristot de Oliveira <bristot@...hat.com>
Cc:     linux-kernel@...r.kernel.org
Subject: [RFC PATCH 3/5] sched/fair: add stats for sched-idle balancing

To better understand the behavior of sched-idle balancing, add
some statistics like other load balancing mechanisms did.

Signed-off-by: Abel Wu <wuyun.abel@...edance.com>
---
 include/linux/sched/topology.h | 5 +++++
 kernel/sched/fair.c            | 6 +++++-
 kernel/sched/stats.c           | 5 +++--
 3 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 03c9c81dc886..4259963d3e5e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -150,6 +150,11 @@ struct sched_domain {
 	unsigned int ttwu_wake_remote;
 	unsigned int ttwu_move_affine;
 	unsigned int ttwu_move_balance;
+
+	/* sched-idle balancing */
+	unsigned int sib_peeked;
+	unsigned int sib_pulled;
+	unsigned int sib_failed;
 #endif
 #ifdef CONFIG_SCHED_DEBUG
 	char *name;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 070a6fb1d2bf..c83c0864e429 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10330,8 +10330,10 @@ static void sched_idle_balance(struct rq *dst_rq)
 		if (cpu == dst_cpu)
 			continue;
 
-		if (!cfs_rq_overloaded(rq))
+		if (!cfs_rq_overloaded(rq)) {
+			schedstat_inc(sd->sib_peeked);
 			continue;
+		}
 
 		rq_lock_irqsave(rq, &rf);
 
@@ -10375,10 +10377,12 @@ static void sched_idle_balance(struct rq *dst_rq)
 		if (p) {
 			attach_one_task(dst_rq, p);
 			local_irq_restore(rf.flags);
+			schedstat_inc(sd->sib_pulled);
 			return;
 		}
 
 		local_irq_restore(rf.flags);
+		schedstat_inc(sd->sib_failed);
 	}
 }
 
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 07dde2928c79..3ee476c72806 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -164,12 +164,13 @@ static int show_schedstat(struct seq_file *seq, void *v)
 				    sd->lb_nobusyg[itype]);
 			}
 			seq_printf(seq,
-				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+				   " %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
 			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
 			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
 			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
 			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
-			    sd->ttwu_move_balance);
+			    sd->ttwu_move_balance, sd->sib_peeked,
+			    sd->sib_pulled, sd->sib_failed);
 		}
 		rcu_read_unlock();
 #endif
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ