lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a143f95aeec5040525530d6292537218aa64d565.1689748843.git.sandipan.das@amd.com>
Date:   Wed, 19 Jul 2023 12:25:40 +0530
From:   Sandipan Das <sandipan.das@....com>
To:     <linux-kernel@...r.kernel.org>, <linux-perf-users@...r.kernel.org>,
        <x86@...nel.org>
CC:     <peterz@...radead.org>, <mingo@...hat.com>, <acme@...nel.org>,
        <mark.rutland@....com>, <alexander.shishkin@...ux.intel.com>,
        <jolsa@...nel.org>, <namhyung@...nel.org>, <irogers@...gle.com>,
        <adrian.hunter@...el.com>, <tglx@...utronix.de>, <bp@...en8.de>,
        <dave.hansen@...ux.intel.com>, <hpa@...or.com>,
        <eranian@...gle.com>, <ananth.narayan@....com>,
        <ravi.bangoria@....com>, <santosh.shukla@....com>,
        <sandipan.das@....com>
Subject: [PATCH 5/6] perf/x86/amd/uncore: Add memory controller support

Unified Memory Controller (UMC) events were introduced with Zen 4 as a
part of the Performance Monitoring Version 2 (PerfMonV2) enhancements.
An event is specified using the EventSelect bits and the RdWrMask bits
can be used for additional filtering of reads and writes.

As of now, a maximum of 12 channels of DDR5 are available on each socket
and each channel controlled by a dedicated UMC. Each UMC has it own set
of counters.

Since the MSR address space for the UMC PERF_CTL and PERF_CTR registers
are reused across sockets, uncore groups are created on the basis of
socket IDs. Hence, group exclusivity is mandatory while opening events
so that events for an UMC can only be opened on CPUs which are the same
socket as the corresponding memory channel.

For each socket, the total number of available UMC counters and active
memory channels are determined from CPUID leaf 0x80000022 EBX and ECX
respectively. Usually, on Zen 4, each UMC gets 4 counters.

MSR assignments are determined on the basis of active UMCs. E.g. if
UMCs 1, 4 and 9 are active for a given socket, then

  * UMC 1 gets MSRs 0xc0010800 to 0xc0010807 as PERF_CTLs and PERF_CTRs
  * UMC 4 gets MSRs 0xc0010808 to 0xc001080f as PERF_CTLs and PERF_CTRs
  * UMC 9 gets MSRs 0xc0010810 to 0xc0010817 as PERF_CTLs and PERF_CTRs

Memory channels are generally labelled using alphabets and the mapping
of UMCs to memory channels is dependent on the family and model. This
information can be found in the "UMC and DDR Phy Logical Mapping"
section of the AMD Processor Programming Reference (PPR).

If there are sockets without any online CPUs when the amd_uncore driver
is loaded, UMCs for such sockets will not be discoverable since the
mechanism relies on executing the CPUID instruction on an online CPU
from the socket.

Signed-off-by: Sandipan Das <sandipan.das@....com>
---
 arch/x86/events/amd/uncore.c      | 171 +++++++++++++++++++++++++++++-
 arch/x86/include/asm/msr-index.h  |   4 +
 arch/x86/include/asm/perf_event.h |   9 ++
 3 files changed, 182 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 6653e8e164bd..c3e1bddd4e1b 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -27,7 +27,12 @@
 
 #define COUNTER_SHIFT		16
 
-#define NUM_UNCORES_MAX		2	/* DF (or NB) and L3 (or L2) */
+/*
+ * While DF (or NB) and L3 (or L2) PMUs have a single instance, there may be
+ * multiple UMC PMUs, each corresponding to active memory channels across all
+ * sockets.
+ */
+#define NUM_UNCORES_MAX		64
 #define UNCORE_NAME_LEN		16
 
 #undef pr_fmt
@@ -264,7 +269,7 @@ static struct device_attribute format_attr_##_var =			\
 DEFINE_UNCORE_FORMAT_ATTR(event12,	event,		"config:0-7,32-35");
 DEFINE_UNCORE_FORMAT_ATTR(event14,	event,		"config:0-7,32-35,59-60"); /* F17h+ DF */
 DEFINE_UNCORE_FORMAT_ATTR(event14v2,	event,		"config:0-7,32-37");	   /* PerfMonV2 DF */
-DEFINE_UNCORE_FORMAT_ATTR(event8,	event,		"config:0-7");		   /* F17h+ L3 */
+DEFINE_UNCORE_FORMAT_ATTR(event8,	event,		"config:0-7");		   /* F17h+ L3, PerfMonV2 UMC */
 DEFINE_UNCORE_FORMAT_ATTR(umask8,	umask,		"config:8-15");
 DEFINE_UNCORE_FORMAT_ATTR(umask12,	umask,		"config:8-15,24-27");	   /* PerfMonV2 DF */
 DEFINE_UNCORE_FORMAT_ATTR(coreid,	coreid,		"config:42-44");	   /* F19h L3 */
@@ -274,6 +279,7 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2,	threadmask,	"config:56-57");	   /* F19h L
 DEFINE_UNCORE_FORMAT_ATTR(enallslices,	enallslices,	"config:46");		   /* F19h L3 */
 DEFINE_UNCORE_FORMAT_ATTR(enallcores,	enallcores,	"config:47");		   /* F19h L3 */
 DEFINE_UNCORE_FORMAT_ATTR(sliceid,	sliceid,	"config:48-50");	   /* F19h L3 */
+DEFINE_UNCORE_FORMAT_ATTR(rdwrmask,    rdwrmask,       "config:8-9");             /* PerfMonV2 UMC */
 
 /* Common DF and NB attributes */
 static struct attribute *amd_uncore_df_format_attr[] = {
@@ -305,6 +311,13 @@ static struct attribute *amd_f19h_uncore_l3_format_attr[] = {
 	NULL,
 };
 
+/* Common UMC attributes */
+static struct attribute *amd_uncore_umc_format_attr[] = {
+	&format_attr_event8.attr,       /* event */
+	&format_attr_rdwrmask.attr,     /* rdwrmask */
+	NULL,
+};
+
 static struct attribute_group amd_uncore_df_format_group = {
 	.name = "format",
 	.attrs = amd_uncore_df_format_attr,
@@ -327,6 +340,11 @@ static struct attribute_group amd_f19h_uncore_l3_format_group = {
 	.is_visible = amd_f19h_uncore_is_visible,
 };
 
+static struct attribute_group amd_uncore_umc_format_group = {
+	.name = "format",
+	.attrs = amd_uncore_umc_format_attr,
+};
+
 static const struct attribute_group *amd_uncore_df_attr_groups[] = {
 	&amd_uncore_attr_group,
 	&amd_uncore_df_format_group,
@@ -345,6 +363,12 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = {
 	NULL,
 };
 
+static const struct attribute_group *amd_uncore_umc_attr_groups[] = {
+	&amd_uncore_attr_group,
+	&amd_uncore_umc_format_group,
+	NULL,
+};
+
 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
 {
 	struct amd_uncore *uncore;
@@ -757,6 +781,145 @@ static int amd_uncore_l3_init(void)
 	return 0;
 }
 
+static int amd_uncore_umc_id(unsigned int cpu)
+{
+	return topology_die_id(cpu);
+}
+
+static int amd_uncore_umc_event_init(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int ret = amd_uncore_event_init(event);
+
+	if (ret)
+		return ret;
+
+	hwc->config = event->attr.config & AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC;
+
+	return 0;
+}
+
+static void amd_uncore_umc_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (flags & PERF_EF_RELOAD)
+		wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
+
+	hwc->state = 0;
+	wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC));
+	perf_event_update_userpage(event);
+}
+
+static int amd_uncore_umc_init(void)
+{
+	unsigned int cpu, eax, ecx, edx;
+	union cpuid_0x80000022_ebx ebx;
+	struct amd_uncore *uncore;
+	int umc_idx = 0, group_id, group_num_umc, group_umc_idx, ret, i;
+	DECLARE_BITMAP(group_mask, NUM_UNCORES_MAX) = { 0 };
+
+	if (pmu_version < 2)
+		return 0;
+
+	/*
+	 * Each group of memory controllers can have an unique configuration
+	 * based on the DIMM population scheme. If all CPUs associated with a
+	 * group of memory channels are offline, the corresponding UMC PMUs
+	 * will not be initialized since they are only discoverable via CPUID.
+	 */
+	for_each_online_cpu(cpu) {
+		group_id = amd_uncore_umc_id(cpu);
+
+		/* Check if this group has already been discovered */
+		if (test_bit(group_id, group_mask))
+			continue;
+
+		__set_bit(group_id, group_mask);
+		ret = cpuid_on_cpu(cpu, EXT_PERFMON_DEBUG_FEATURES, &eax,
+				   &ebx.full, &ecx, &edx);
+		if (ret)
+			goto fail;
+
+		group_umc_idx = 0;
+		group_num_umc = hweight32(ecx);
+
+		/*
+		 * There are more PMUs than anticipated and the max array size
+		 * needs to be increased to accommodate them
+		 */
+		if ((num_uncores + umc_idx + group_num_umc) > NUM_UNCORES_MAX) {
+			WARN(1, "some uncore PMUs cannot be initialized");
+			break;
+		}
+
+		/* Create PMUs for active UMCs in the current group */
+		for (i = 0; i < 32; i++) {
+			if (!(ecx & BIT(i)))
+				continue;
+
+			uncore = &uncores[num_uncores + umc_idx];
+			snprintf(uncore->name, sizeof(uncore->name), "amd_umc_%d", umc_idx);
+			uncore->num_counters = ebx.split.num_umc_pmc / group_num_umc;
+			uncore->msr_base = MSR_F19H_UMC_PERF_CTL + group_umc_idx * uncore->num_counters * 2;
+			uncore->rdpmc_base = -1;
+			uncore->id = amd_uncore_umc_id;
+			uncore->group = group_id;
+
+			uncore->ctx = alloc_percpu(struct amd_uncore_ctx *);
+			if (!uncore->ctx) {
+				ret = -ENOMEM;
+				goto fail;
+			}
+
+			uncore->pmu = (struct pmu) {
+				.task_ctx_nr	= perf_invalid_context,
+				.attr_groups	= amd_uncore_umc_attr_groups,
+				.name		= uncore->name,
+				.event_init	= amd_uncore_umc_event_init,
+				.add		= amd_uncore_add,
+				.del		= amd_uncore_del,
+				.start		= amd_uncore_umc_start,
+				.stop		= amd_uncore_stop,
+				.read		= amd_uncore_read,
+				.capabilities	= PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+				.module		= THIS_MODULE,
+			};
+
+			ret = perf_pmu_register(&uncore->pmu, uncore->pmu.name, -1);
+			if (ret) {
+				free_percpu(uncore->ctx);
+				uncore->ctx = NULL;
+				goto fail;
+			}
+
+			pr_info("%d %s %s counters detected\n", uncore->num_counters,
+				boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?  "HYGON" : "",
+				uncore->pmu.name);
+
+			umc_idx++;
+			group_umc_idx++;
+		}
+	}
+
+	num_uncores += umc_idx;
+
+	return 0;
+
+fail:
+	for (i = 0; i < umc_idx; i++) {
+		uncore = &uncores[num_uncores + i];
+		if (!uncore->ctx)
+			continue;
+
+		perf_pmu_unregister(&uncore->pmu);
+		free_percpu(uncore->ctx);
+		uncore->ctx = NULL;
+	}
+
+	return ret;
+}
+
 static void uncore_free(void)
 {
 	struct amd_uncore *uncore;
@@ -797,6 +960,10 @@ static int __init amd_uncore_init(void)
 	if (ret)
 		goto fail;
 
+	ret = amd_uncore_umc_init();
+	if (ret)
+		goto fail;
+
 	/*
 	 * Install callbacks. Core will call them for each online cpu.
 	 */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3aedae61af4f..bfcc72b20f54 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -624,6 +624,10 @@
 /* AMD Last Branch Record MSRs */
 #define MSR_AMD64_LBR_SELECT			0xc000010e
 
+/* Fam 19h MSRs */
+#define MSR_F19H_UMC_PERF_CTL		0xc0010800
+#define MSR_F19H_UMC_PERF_CTR		0xc0010801
+
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF			0xc00000e9
 
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 85a9fd5a3ec3..2618ec7c3d1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -112,6 +112,13 @@
 	(AMD64_PERFMON_V2_EVENTSEL_EVENT_NB	|	\
 	 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
 
+#define AMD64_PERFMON_V2_ENABLE_UMC			BIT_ULL(31)
+#define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC		GENMASK_ULL(7, 0)
+#define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC		GENMASK_ULL(9, 8)
+#define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC		\
+	(AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC	|	\
+	 AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC)
+
 #define AMD64_NUM_COUNTERS				4
 #define AMD64_NUM_COUNTERS_CORE				6
 #define AMD64_NUM_COUNTERS_NB				4
@@ -232,6 +239,8 @@ union cpuid_0x80000022_ebx {
 		unsigned int	lbr_v2_stack_sz:6;
 		/* Number of Data Fabric Counters */
 		unsigned int	num_df_pmc:6;
+		/* Number of Unified Memory Controller Counters */
+		unsigned int	num_umc_pmc:6;
 	} split;
 	unsigned int		full;
 };
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ