lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7958e729c6be0f682379bec81f115b8cd7cca1ad.1647498015.git.sandipan.das@amd.com>
Date:   Thu, 17 Mar 2022 11:58:34 +0530
From:   Sandipan Das <sandipan.das@....com>
To:     <linux-kernel@...r.kernel.org>, <linux-perf-users@...r.kernel.org>,
        <x86@...nel.org>
CC:     <peterz@...radead.org>, <bp@...en8.de>,
        <dave.hansen@...ux.intel.com>, <acme@...nel.org>,
        <mark.rutland@....com>, <alexander.shishkin@...ux.intel.com>,
        <namhyung@...nel.org>, <jolsa@...nel.org>, <tglx@...utronix.de>,
        <mingo@...hat.com>, <pbonzini@...hat.com>, <jmattson@...gle.com>,
        <like.xu.linux@...il.com>, <eranian@...gle.com>,
        <ananth.narayan@....com>, <ravi.bangoria@....com>,
        <santosh.shukla@....com>, <sandipan.das@....com>
Subject: [PATCH 5/7] perf/x86/amd/core: Add PerfMonV2 counter control

If AMD Performance Monitoring Version 2 (PerfMonV2) is
supported, use a new scheme to manage the Core PMCs using
the new global control and status registers. This will be
bypassed on unsupported hardware (x86_pmu.version < 2).

Currently, all PMCs have dedicated control (PERF_CTL) and
counter (PERF_CTR) registers. For a given PMC, the enable
(En) bit of its PERF_CTL register is used to start or stop
counting.

The Performance Counter Global Control (PerfCntrGlobalCtl)
register has enable (PerfCntrEn) bits for each PMC. For a
PMC to start counting, both PERF_CTL and PerfCntrGlobalCtl
enable bits must be set. If either of those are cleared,
the PMC stops counting.

In x86_pmu_{en,dis}able_all(), the PERF_CTL registers of
all active PMCs are written to in a loop. Ideally, PMCs
counting the same event that were started and stopped at
the same time should record the same counts. Due to delays
in between writes to the PERF_CTL registers across loop
iterations, the PMCs cannot be enabled or disabled at the
same instant and hence, record slightly different counts.
This is fixed by enabling or disabling all active PMCs at
the same time with a single write to the PerfCntrGlobalCtl
register.

Signed-off-by: Sandipan Das <sandipan.das@....com>
---
 arch/x86/events/amd/core.c | 58 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 55 insertions(+), 3 deletions(-)

diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 05d79afe5173..532e9bd76bf1 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -596,6 +596,11 @@ static void amd_pmu_cpu_dead(int cpu)
 	amd_pmu_cpu_reset(cpu);
 }
 
+static inline void amd_pmu_set_global_ctl(u64 ctl)
+{
+	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
+}
+
 /*
  * When a PMC counter overflows, an NMI is used to process the event and
  * reset the counter. NMI latency can result in the counter being updated
@@ -625,12 +630,32 @@ static void amd_pmu_wait_on_overflow(int idx)
 	}
 }
 
+static void amd_pmu_global_enable_all(int added)
+{
+	amd_pmu_set_global_ctl(amd_pmu_global_cntr_mask);
+}
+
+DEFINE_STATIC_CALL(amd_pmu_enable_all, x86_pmu_enable_all);
+
+static void amd_pmu_enable_all(int added)
+{
+	static_call(amd_pmu_enable_all)(added);
+}
+
+static void amd_pmu_global_disable_all(void)
+{
+	/* Disable all PMCs */
+	amd_pmu_set_global_ctl(0);
+}
+
+DEFINE_STATIC_CALL(amd_pmu_disable_all, x86_pmu_disable_all);
+
 static void amd_pmu_disable_all(void)
 {
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	int idx;
 
-	x86_pmu_disable_all();
+	static_call(amd_pmu_disable_all)();
 
 	/*
 	 * This shouldn't be called from NMI context, but add a safeguard here
@@ -671,6 +696,28 @@ static void amd_pmu_disable_event(struct perf_event *event)
 	amd_pmu_wait_on_overflow(event->hw.idx);
 }
 
+static void amd_pmu_global_enable_event(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * Testing cpu_hw_events.enabled should be skipped in this case unlike
+	 * in x86_pmu_enable_event().
+	 *
+	 * Since cpu_hw_events.enabled is set only after returning from
+	 * x86_pmu_start(), the PMCs must be programmed and kept ready.
+	 * Counting starts only after x86_pmu_enable_all() is called.
+	 */
+	__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
+}
+
+DEFINE_STATIC_CALL(amd_pmu_enable_event, x86_pmu_enable_event);
+
+static void amd_pmu_enable_event(struct perf_event *event)
+{
+	static_call(amd_pmu_enable_event)(event);
+}
+
 /*
  * Because of NMI latency, if multiple PMC counters are active or other sources
  * of NMIs are received, the perf NMI handler can handle one or more overflowed
@@ -929,8 +976,8 @@ static __initconst const struct x86_pmu amd_pmu = {
 	.name			= "AMD",
 	.handle_irq		= amd_pmu_handle_irq,
 	.disable_all		= amd_pmu_disable_all,
-	.enable_all		= x86_pmu_enable_all,
-	.enable			= x86_pmu_enable_event,
+	.enable_all		= amd_pmu_enable_all,
+	.enable			= amd_pmu_enable_event,
 	.disable		= amd_pmu_disable_event,
 	.hw_config		= amd_pmu_hw_config,
 	.schedule_events	= x86_schedule_events,
@@ -989,6 +1036,11 @@ static int __init amd_core_pmu_init(void)
 		x86_pmu.num_counters = EXT_PERFMON_DEBUG_NUM_CORE_PMC(ebx);
 
 		amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
+
+		/* Update PMC handling functions */
+		static_call_update(amd_pmu_enable_all, amd_pmu_global_enable_all);
+		static_call_update(amd_pmu_disable_all, amd_pmu_global_disable_all);
+		static_call_update(amd_pmu_enable_event, amd_pmu_global_enable_event);
 	}
 
 	/*
-- 
2.32.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ