[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220425044323.2830-2-ravi.bangoria@amd.com>
Date: Mon, 25 Apr 2022 10:13:18 +0530
From: Ravi Bangoria <ravi.bangoria@....com>
To: <peterz@...radead.org>, <acme@...nel.org>
CC: <ravi.bangoria@....com>, <mingo@...hat.com>,
<mark.rutland@....com>, <jolsa@...nel.org>, <namhyung@...nel.org>,
<tglx@...utronix.de>, <bp@...en8.de>, <irogers@...gle.com>,
<yao.jin@...ux.intel.com>, <james.clark@....com>,
<leo.yan@...aro.org>, <kan.liang@...ux.intel.com>,
<ak@...ux.intel.com>, <eranian@...gle.com>,
<like.xu.linux@...il.com>, <x86@...nel.org>,
<linux-perf-users@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<sandipan.das@....com>, <ananth.narayan@....com>,
<kim.phillips@....com>, <rrichter@....com>,
<santosh.shukla@....com>
Subject: [PATCH 1/6] perf/amd/ibs: Add support for L3 miss filtering
IBS L3 miss filtering works by tagging an instruction on IBS counter
overflow and generating an NMI if the tagged instruction causes an L3
miss. Samples without an L3 miss are discarded and counter is reset
with random value (between 1-15 for fetch pmu and 1-127 for op pmu).
This helps in reducing sampling overhead when user is interested only
in such samples. One of the use case of such filtered samples is to
feed data to page-migration daemon in tiered memory systems.
Add support for L3 miss filtering in IBS driver via new pmu attribute
"l3missonly". Example usage:
# perf record -a -e ibs_op/l3missonly=1/ --raw-samples sleep 5
Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
---
arch/x86/events/amd/ibs.c | 42 ++++++++++++++++++++++---------
arch/x86/include/asm/perf_event.h | 3 +++
2 files changed, 33 insertions(+), 12 deletions(-)
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 9739019d4b67..a5303d62060c 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -520,16 +520,12 @@ static void perf_ibs_read(struct perf_event *event) { }
PMU_FORMAT_ATTR(rand_en, "config:57");
PMU_FORMAT_ATTR(cnt_ctl, "config:19");
+PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
+PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
-static struct attribute *ibs_fetch_format_attrs[] = {
- &format_attr_rand_en.attr,
- NULL,
-};
-
-static struct attribute *ibs_op_format_attrs[] = {
- NULL, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
- NULL,
-};
+/* size = nr attrs plus NULL at the end */
+static struct attribute *ibs_fetch_format_attrs[3];
+static struct attribute *ibs_op_format_attrs[3];
static struct perf_ibs perf_ibs_fetch = {
.pmu = {
@@ -759,9 +755,9 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
return ret;
}
-static __init void perf_event_ibs_init(void)
+static __init void perf_ibs_fetch_prepare(void)
{
- struct attribute **attr = ibs_op_format_attrs;
+ struct attribute **format_attrs = perf_ibs_fetch.format_attrs;
/*
* Some chips fail to reset the fetch count when it is written; instead
@@ -773,11 +769,22 @@ static __init void perf_event_ibs_init(void)
if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
+ *format_attrs++ = &format_attr_rand_en.attr;
+ if (ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS) {
+ perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
+ *format_attrs++ = &fetch_l3missonly.attr.attr;
+ }
+
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
+}
+
+static __init void perf_ibs_op_prepare(void)
+{
+ struct attribute **format_attrs = perf_ibs_op.format_attrs;
if (ibs_caps & IBS_CAPS_OPCNT) {
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
- *attr++ = &format_attr_cnt_ctl.attr;
+ *format_attrs++ = &format_attr_cnt_ctl.attr;
}
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
@@ -786,7 +793,18 @@ static __init void perf_event_ibs_init(void)
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
}
+ if (ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS) {
+ perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
+ *format_attrs++ = &op_l3missonly.attr.attr;
+ }
+
perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
+}
+
+static __init void perf_event_ibs_init(void)
+{
+ perf_ibs_fetch_prepare();
+ perf_ibs_op_prepare();
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index b06e4c573add..a24b637a6e1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -391,6 +391,7 @@ struct pebs_xmm {
#define IBS_CAPS_OPBRNFUSE (1U<<8)
#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
#define IBS_CAPS_OPDATA4 (1U<<10)
+#define IBS_CAPS_ZEN4IBSEXTENSIONS (1U<<11)
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
| IBS_CAPS_FETCHSAM \
@@ -404,6 +405,7 @@ struct pebs_xmm {
#define IBSCTL_LVT_OFFSET_MASK 0x0F
/* IBS fetch bits/masks */
+#define IBS_FETCH_L3MISSONLY (1ULL<<59)
#define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48)
@@ -420,6 +422,7 @@ struct pebs_xmm {
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_L3MISSONLY (1ULL<<16)
#define IBS_OP_MAX_CNT 0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
--
2.27.0
Powered by blists - more mailing lists