[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a5cbc369-ada7-03aa-599b-f90c6306a124@amd.com>
Date: Tue, 26 Apr 2022 17:00:33 +0530
From: Ravi Bangoria <ravi.bangoria@....com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: acme@...nel.org, mingo@...hat.com, mark.rutland@....com,
jolsa@...nel.org, namhyung@...nel.org, tglx@...utronix.de,
bp@...en8.de, irogers@...gle.com, yao.jin@...ux.intel.com,
james.clark@....com, leo.yan@...aro.org, kan.liang@...ux.intel.com,
ak@...ux.intel.com, eranian@...gle.com, like.xu.linux@...il.com,
x86@...nel.org, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, sandipan.das@....com,
ananth.narayan@....com, kim.phillips@....com, rrichter@....com,
santosh.shukla@....com, Ravi Bangoria <ravi.bangoria@....com>
Subject: Re: [PATCH 1/6] perf/amd/ibs: Add support for L3 miss filtering
On 26-Apr-22 3:37 PM, Peter Zijlstra wrote:
> On Mon, Apr 25, 2022 at 10:13:18AM +0530, Ravi Bangoria wrote:
>> IBS L3 miss filtering works by tagging an instruction on IBS counter
>> overflow and generating an NMI if the tagged instruction causes an L3
>> miss. Samples without an L3 miss are discarded and counter is reset
>> with random value (between 1-15 for fetch pmu and 1-127 for op pmu).
>> This helps in reducing sampling overhead when user is interested only
>> in such samples. One of the use case of such filtered samples is to
>> feed data to page-migration daemon in tiered memory systems.
>>
>> Add support for L3 miss filtering in IBS driver via new pmu attribute
>> "l3missonly". Example usage:
>>
>> # perf record -a -e ibs_op/l3missonly=1/ --raw-samples sleep 5
>>
>> Signed-off-by: Ravi Bangoria <ravi.bangoria@....com>
>> ---
>> arch/x86/events/amd/ibs.c | 42 ++++++++++++++++++++++---------
>> arch/x86/include/asm/perf_event.h | 3 +++
>> 2 files changed, 33 insertions(+), 12 deletions(-)
>>
>> diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
>> index 9739019d4b67..a5303d62060c 100644
>> --- a/arch/x86/events/amd/ibs.c
>> +++ b/arch/x86/events/amd/ibs.c
>> @@ -520,16 +520,12 @@ static void perf_ibs_read(struct perf_event *event) { }
>>
>> PMU_FORMAT_ATTR(rand_en, "config:57");
>> PMU_FORMAT_ATTR(cnt_ctl, "config:19");
>> +PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
>> +PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
>>
>> -static struct attribute *ibs_fetch_format_attrs[] = {
>> - &format_attr_rand_en.attr,
>> - NULL,
>> -};
>> -
>> -static struct attribute *ibs_op_format_attrs[] = {
>> - NULL, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
>> - NULL,
>> -};
>> +/* size = nr attrs plus NULL at the end */
>> +static struct attribute *ibs_fetch_format_attrs[3];
>> +static struct attribute *ibs_op_format_attrs[3];
>>
>> static struct perf_ibs perf_ibs_fetch = {
>> .pmu = {
>> @@ -759,9 +755,9 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
>> return ret;
>> }
>>
>> -static __init void perf_event_ibs_init(void)
>> +static __init void perf_ibs_fetch_prepare(void)
>> {
>> - struct attribute **attr = ibs_op_format_attrs;
>> + struct attribute **format_attrs = perf_ibs_fetch.format_attrs;
>>
>> /*
>> * Some chips fail to reset the fetch count when it is written; instead
>> @@ -773,11 +769,22 @@ static __init void perf_event_ibs_init(void)
>> if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
>> perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
>>
>> + *format_attrs++ = &format_attr_rand_en.attr;
>> + if (ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS) {
>> + perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
>> + *format_attrs++ = &fetch_l3missonly.attr.attr;
>> + }
>> +
>> perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
>> +}
>> +
>> +static __init void perf_ibs_op_prepare(void)
>> +{
>> + struct attribute **format_attrs = perf_ibs_op.format_attrs;
>>
>> if (ibs_caps & IBS_CAPS_OPCNT) {
>> perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
>> - *attr++ = &format_attr_cnt_ctl.attr;
>> + *format_attrs++ = &format_attr_cnt_ctl.attr;
>> }
>>
>> if (ibs_caps & IBS_CAPS_OPCNTEXT) {
>> @@ -786,7 +793,18 @@ static __init void perf_event_ibs_init(void)
>> perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
>> }
>>
>> + if (ibs_caps & IBS_CAPS_ZEN4IBSEXTENSIONS) {
>> + perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
>> + *format_attrs++ = &op_l3missonly.attr.attr;
>> + }
>> +
>> perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
>> +}
>
> Right, so Greg told us to stop doing silly things like this and use
> .is_visible, also see commits like:
>
> b7c9b3927337 ("perf/x86/intel: Use ->is_visible callback for default group")
>
> There's quite a bit of that in the intel driver and some in the x86
> core code too. Please have a look.
Sure.
Thanks for the review,
Ravi
Powered by blists - more mailing lists