[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8c32c5be-02a4-4f4c-9a81-1afe649e88bb@linux.intel.com>
Date: Mon, 11 Aug 2025 17:00:14 -0700
From: "Liang, Kan" <kan.liang@...ux.intel.com>
To: Dapeng Mi <dapeng1.mi@...ux.intel.com>,
Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>, Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Andi Kleen <ak@...ux.intel.com>, Eranian Stephane <eranian@...gle.com>
Cc: linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
Dapeng Mi <dapeng1.mi@...el.com>, Yi Lai <yi1.lai@...el.com>
Subject: Re: [Patch v2 6/6] perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into
INTEL_FIXED_BITS_MASK
On 2025-08-11 2:00 a.m., Dapeng Mi wrote:
> ICL_FIXED_0_ADAPTIVE is missed to be added into INTEL_FIXED_BITS_MASK,
> add it and opportunistically refine fixed counter enabling code.
>
> Signed-off-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
> Tested-by: Yi Lai <yi1.lai@...el.com>
> ---
> arch/x86/events/intel/core.c | 10 +++-------
> arch/x86/include/asm/perf_event.h | 6 +++++-
> arch/x86/kvm/pmu.h | 2 +-
> 3 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index cdd10370ed95..1a91b527d3c5 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -2849,8 +2849,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
> {
> struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> struct hw_perf_event *hwc = &event->hw;
> - u64 mask, bits = 0;
> int idx = hwc->idx;
> + u64 bits = 0;
>
> if (is_topdown_idx(idx)) {
> struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> @@ -2889,14 +2889,10 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
>
> idx -= INTEL_PMC_IDX_FIXED;
> bits = intel_fixed_bits_by_idx(idx, bits);
> - mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
> -
> - if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
> + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip)
> bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
> - mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
> - }
>
This changes the behavior. The mask will always include the ADAPTIVE bit
even on a platform which doesn't support adaptive pebs.
The description doesn't mention why it's OK.
Thanks,
Kan> - cpuc->fixed_ctrl_val &= ~mask;
> + cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
> cpuc->fixed_ctrl_val |= bits;
> }
>
> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
> index f8247ac276c4..49a4d442f3fc 100644
> --- a/arch/x86/include/asm/perf_event.h
> +++ b/arch/x86/include/asm/perf_event.h
> @@ -35,7 +35,6 @@
> #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36)
> #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40)
>
> -#define INTEL_FIXED_BITS_MASK 0xFULL
> #define INTEL_FIXED_BITS_STRIDE 4
> #define INTEL_FIXED_0_KERNEL (1ULL << 0)
> #define INTEL_FIXED_0_USER (1ULL << 1)
> @@ -48,6 +47,11 @@
> #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
> #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
>
> +#define INTEL_FIXED_BITS_MASK \
> + (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER | \
> + INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI | \
> + ICL_FIXED_0_ADAPTIVE)
> +
> #define intel_fixed_bits_by_idx(_idx, _bits) \
> ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
>
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index ad89d0bd6005..103604c4b33b 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -13,7 +13,7 @@
> #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
> MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
>
> -/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
> +/* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */
> #define fixed_ctrl_field(ctrl_reg, idx) \
> (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
>
Powered by blists - more mailing lists