[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220509044914.1473-4-ravi.bangoria@amd.com>
Date: Mon, 9 May 2022 10:19:09 +0530
From: Ravi Bangoria <ravi.bangoria@....com>
To: <peterz@...radead.org>, <acme@...nel.org>
CC: <ravi.bangoria@....com>, <rrichter@....com>, <mingo@...hat.com>,
<mark.rutland@....com>, <jolsa@...nel.org>, <namhyung@...nel.org>,
<tglx@...utronix.de>, <bp@...en8.de>, <irogers@...gle.com>,
<yao.jin@...ux.intel.com>, <james.clark@....com>,
<leo.yan@...aro.org>, <kan.liang@...ux.intel.com>,
<ak@...ux.intel.com>, <eranian@...gle.com>,
<like.xu.linux@...il.com>, <x86@...nel.org>,
<linux-perf-users@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<sandipan.das@....com>, <ananth.narayan@....com>,
<kim.phillips@....com>, <santosh.shukla@....com>
Subject: [PATCH v2 3/8] perf/amd/ibs: Add support for L3 miss filtering
IBS L3 miss filtering works by tagging an instruction on IBS counter
overflow and generating an NMI if the tagged instruction causes an L3
miss. Samples without an L3 miss are discarded and counter is reset
with random value (between 1-15 for fetch pmu and 1-127 for op pmu).
This helps in reducing sampling overhead when user is interested only
in such samples. One of the use case of such filtered samples is to
feed data to page-migration daemon in tiered memory systems.
Add support for L3 miss filtering in IBS driver via new pmu attribute
"l3missonly". Example usage:
# perf record -a -e ibs_op/l3missonly=1/ --raw-samples sleep 5
Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
---
arch/x86/events/amd/ibs.c | 67 +++++++++++++++++++++++++++----
arch/x86/include/asm/perf_event.h | 3 ++
2 files changed, 63 insertions(+), 7 deletions(-)
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 785212b5dfd6..52d2eb9ff19a 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -534,22 +534,46 @@ static const struct attribute_group *empty_attr_groups[] = {
PMU_FORMAT_ATTR(rand_en, "config:57");
PMU_FORMAT_ATTR(cnt_ctl, "config:19");
+PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
+PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
+
+static umode_t
+zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ return ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS ? attr->mode : 0;
+}
static struct attribute *rand_en_attrs[] = {
&format_attr_rand_en.attr,
NULL,
};
+static struct attribute *fetch_l3missonly_attrs[] = {
+ &fetch_l3missonly.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_rand_en = {
.name = "format",
.attrs = rand_en_attrs,
};
+static struct attribute_group group_fetch_l3missonly = {
+ .name = "format",
+ .attrs = fetch_l3missonly_attrs,
+ .is_visible = zen4_ibs_extensions_is_visible,
+};
+
static const struct attribute_group *fetch_attr_groups[] = {
&group_rand_en,
NULL,
};
+static const struct attribute_group *fetch_attr_update[] = {
+ &group_fetch_l3missonly,
+ NULL,
+};
+
static umode_t
cnt_ctl_is_visible(struct kobject *kobj, struct attribute *attr, int i)
{
@@ -561,14 +585,26 @@ static struct attribute *cnt_ctl_attrs[] = {
NULL,
};
+static struct attribute *op_l3missonly_attrs[] = {
+ &op_l3missonly.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_cnt_ctl = {
.name = "format",
.attrs = cnt_ctl_attrs,
.is_visible = cnt_ctl_is_visible,
};
+static struct attribute_group group_op_l3missonly = {
+ .name = "format",
+ .attrs = op_l3missonly_attrs,
+ .is_visible = zen4_ibs_extensions_is_visible,
+};
+
static const struct attribute_group *op_attr_update[] = {
&group_cnt_ctl,
+ &group_op_l3missonly,
NULL,
};
@@ -787,10 +823,8 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
return ret;
}
-static __init int perf_event_ibs_init(void)
+static __init int perf_ibs_fetch_init(void)
{
- int ret;
-
/*
* Some chips fail to reset the fetch count when it is written; instead
* they need a 0-1 transition of IbsFetchEn.
@@ -801,12 +835,17 @@ static __init int perf_event_ibs_init(void)
if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
+ if (ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS)
+ perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
+
perf_ibs_fetch.pmu.attr_groups = fetch_attr_groups;
+ perf_ibs_fetch.pmu.attr_update = fetch_attr_update;
- ret = perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
- if (ret)
- return ret;
+ return perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
+}
+static __init int perf_ibs_op_init(void)
+{
if (ibs_caps & IBS_CAPS_OPCNT)
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
@@ -816,10 +855,24 @@ static __init int perf_event_ibs_init(void)
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
}
+ if (ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS)
+ perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
+
perf_ibs_op.pmu.attr_groups = empty_attr_groups;
perf_ibs_op.pmu.attr_update = op_attr_update;
- ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
+ return perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
+}
+
+static __init int perf_event_ibs_init(void)
+{
+ int ret;
+
+ ret = perf_ibs_fetch_init();
+ if (ret)
+ return ret;
+
+ ret = perf_ibs_op_init();
if (ret)
goto err_op;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index b06e4c573add..a24b637a6e1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -391,6 +391,7 @@ struct pebs_xmm {
#define IBS_CAPS_OPBRNFUSE (1U<<8)
#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
#define IBS_CAPS_OPDATA4 (1U<<10)
+#define IBS_CAPS_ZEN4IBSEXTENSIONS (1U<<11)
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
| IBS_CAPS_FETCHSAM \
@@ -404,6 +405,7 @@ struct pebs_xmm {
#define IBSCTL_LVT_OFFSET_MASK 0x0F
/* IBS fetch bits/masks */
+#define IBS_FETCH_L3MISSONLY (1ULL<<59)
#define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48)
@@ -420,6 +422,7 @@ struct pebs_xmm {
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_L3MISSONLY (1ULL<<16)
#define IBS_OP_MAX_CNT 0x0000FFFFULL
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
--
2.27.0
Powered by blists - more mailing lists