lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 27 Nov 2019 17:31:10 +0530
From:   "Gautham R. Shenoy" <ego@...ux.vnet.ibm.com>
To:     Michael Ellerman <mpe@...erman.id.au>,
        Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>,
        Nathan Lynch <nathanl@...ux.ibm.com>,
        Kamalesh Babulal <kamalesh@...ux.vnet.ibm.com>,
        "Naveen N. Rao" <naveen.n.rao@...ux.vnet.ibm.com>,
        Tyrel Datwyler <tyreld@...ux.ibm.com>
Cc:     linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
        "Gautham R. Shenoy" <ego@...ux.vnet.ibm.com>
Subject: [PATCH 1/3] powerpc/pseries: Account for SPURR ticks on idle CPUs

From: "Gautham R. Shenoy" <ego@...ux.vnet.ibm.com>

On PSeries LPARs, to compute the utilization, tools such as lparstat
need to know the [S]PURR ticks when the CPUs were busy or idle.

In the pseries cpuidle driver, we keep track of the idle PURR ticks in
the VPA variable "wait_state_cycles". This patch extends the support
to account for the idle SPURR ticks.

Signed-off-by: Gautham R. Shenoy <ego@...ux.vnet.ibm.com>
---
 arch/powerpc/kernel/idle.c        |  2 ++
 drivers/cpuidle/cpuidle-pseries.c | 28 +++++++++++++++++-----------
 2 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index a36fd05..708ec68 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -33,6 +33,8 @@
 unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
 EXPORT_SYMBOL(cpuidle_disable);
 
+DEFINE_PER_CPU(u64, idle_spurr_cycles);
+
 static int __init powersave_off(char *arg)
 {
 	ppc_md.power_save = NULL;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 74c2479..45e2be4 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -30,11 +30,14 @@ struct cpuidle_driver pseries_idle_driver = {
 static struct cpuidle_state *cpuidle_state_table __read_mostly;
 static u64 snooze_timeout __read_mostly;
 static bool snooze_timeout_en __read_mostly;
+DECLARE_PER_CPU(u64, idle_spurr_cycles);
 
-static inline void idle_loop_prolog(unsigned long *in_purr)
+static inline void idle_loop_prolog(unsigned long *in_purr,
+				    unsigned long *in_spurr)
 {
 	ppc64_runlatch_off();
 	*in_purr = mfspr(SPRN_PURR);
+	*in_spurr = mfspr(SPRN_SPURR);
 	/*
 	 * Indicate to the HV that we are idle. Now would be
 	 * a good time to find other work to dispatch.
@@ -42,13 +45,16 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
 	get_lppaca()->idle = 1;
 }
 
-static inline void idle_loop_epilog(unsigned long in_purr)
+static inline void idle_loop_epilog(unsigned long in_purr,
+				    unsigned long in_spurr)
 {
 	u64 wait_cycles;
+	u64 *idle_spurr_cycles_ptr = this_cpu_ptr(&idle_spurr_cycles);
 
 	wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
 	wait_cycles += mfspr(SPRN_PURR) - in_purr;
 	get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+	*idle_spurr_cycles_ptr += mfspr(SPRN_SPURR) - in_spurr;
 	get_lppaca()->idle = 0;
 
 	ppc64_runlatch_on();
@@ -58,12 +64,12 @@ static int snooze_loop(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index)
 {
-	unsigned long in_purr;
+	unsigned long in_purr, in_spurr;
 	u64 snooze_exit_time;
 
 	set_thread_flag(TIF_POLLING_NRFLAG);
 
-	idle_loop_prolog(&in_purr);
+	idle_loop_prolog(&in_purr, &in_spurr);
 	local_irq_enable();
 	snooze_exit_time = get_tb() + snooze_timeout;
 
@@ -87,7 +93,7 @@ static int snooze_loop(struct cpuidle_device *dev,
 
 	local_irq_disable();
 
-	idle_loop_epilog(in_purr);
+	idle_loop_epilog(in_purr, in_spurr);
 
 	return index;
 }
@@ -113,9 +119,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
 				struct cpuidle_driver *drv,
 				int index)
 {
-	unsigned long in_purr;
+	unsigned long in_purr, in_spurr;
 
-	idle_loop_prolog(&in_purr);
+	idle_loop_prolog(&in_purr, &in_spurr);
 	get_lppaca()->donate_dedicated_cpu = 1;
 
 	HMT_medium();
@@ -124,7 +130,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
 	local_irq_disable();
 	get_lppaca()->donate_dedicated_cpu = 0;
 
-	idle_loop_epilog(in_purr);
+	idle_loop_epilog(in_purr, in_spurr);
 
 	return index;
 }
@@ -133,9 +139,9 @@ static int shared_cede_loop(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index)
 {
-	unsigned long in_purr;
+	unsigned long in_purr, in_spurr;
 
-	idle_loop_prolog(&in_purr);
+	idle_loop_prolog(&in_purr, &in_spurr);
 
 	/*
 	 * Yield the processor to the hypervisor.  We return if
@@ -147,7 +153,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
 	check_and_cede_processor();
 
 	local_irq_disable();
-	idle_loop_epilog(in_purr);
+	idle_loop_epilog(in_purr, in_spurr);
 
 	return index;
 }
-- 
1.9.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ