[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ba31c391-e703-4ff6-9742-4518d36bffa6@linux.intel.com>
Date: Wed, 22 Oct 2025 13:27:31 +0800
From: "Mi, Dapeng" <dapeng1.mi@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>, Arnaldo Carvalho de Melo
<acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Andi Kleen <ak@...ux.intel.com>, Eranian Stephane <eranian@...gle.com>,
linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
Dapeng Mi <dapeng1.mi@...el.com>
Subject: Re: [Patch v8 05/12] perf/x86/intel: Initialize architectural PEBS
On 10/21/2025 11:43 PM, Peter Zijlstra wrote:
> On Wed, Oct 15, 2025 at 02:44:15PM +0800, Dapeng Mi wrote:
>> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
>> index c88bcd5d2bc4..bfb123ff7c9a 100644
>> --- a/arch/x86/events/intel/core.c
>> +++ b/arch/x86/events/intel/core.c
>> @@ -5273,34 +5273,58 @@ static inline bool intel_pmu_broken_perf_cap(void)
>>
>> static void update_pmu_cap(struct pmu *pmu)
>> {
>> - unsigned int cntr, fixed_cntr, ecx, edx;
>> - union cpuid35_eax eax;
>> - union cpuid35_ebx ebx;
>> + unsigned int eax, ebx, ecx, edx;
>> + union cpuid35_eax eax_0;
>> + union cpuid35_ebx ebx_0;
>> + u64 cntrs_mask = 0;
>> + u64 pebs_mask = 0;
>> + u64 pdists_mask = 0;
>>
>> - cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
>> + cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx);
>>
>> - if (ebx.split.umask2)
>> + if (ebx_0.split.umask2)
>> hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2;
>> - if (ebx.split.eq)
>> + if (ebx_0.split.eq)
>> hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ;
>>
>> - if (eax.split.cntr_subleaf) {
>> + if (eax_0.split.cntr_subleaf) {
>> cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
>> - &cntr, &fixed_cntr, &ecx, &edx);
>> - hybrid(pmu, cntr_mask64) = cntr;
>> - hybrid(pmu, fixed_cntr_mask64) = fixed_cntr;
>> + &eax, &ebx, &ecx, &edx);
>> + hybrid(pmu, cntr_mask64) = eax;
>> + hybrid(pmu, fixed_cntr_mask64) = ebx;
>> + cntrs_mask = (u64)ebx << INTEL_PMC_IDX_FIXED | eax;
>> }
>>
>> - if (eax.split.acr_subleaf) {
>> + if (eax_0.split.acr_subleaf) {
>> cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
>> - &cntr, &fixed_cntr, &ecx, &edx);
>> + &eax, &ebx, &ecx, &edx);
>> /* The mask of the counters which can be reloaded */
>> - hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED);
>> + hybrid(pmu, acr_cntr_mask64) = eax | ((u64)ebx << INTEL_PMC_IDX_FIXED);
>>
>> /* The mask of the counters which can cause a reload of reloadable counters */
>> hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
>> }
>>
>> + /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
>> + if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) {
>> + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF,
>> + &eax, &ebx, &ecx, &edx);
>> + hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32;
>> +
>> + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF,
>> + &eax, &ebx, &ecx, &edx);
>> + pebs_mask = ((u64)ecx << INTEL_PMC_IDX_FIXED) | eax;
>> + pdists_mask = ((u64)edx << INTEL_PMC_IDX_FIXED) | ebx;
>> + hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
>> + hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
>> +
>> + if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask))
>> + x86_pmu.arch_pebs = 0;
>> + } else {
>> + WARN_ON(x86_pmu.arch_pebs == 1);
>> + x86_pmu.arch_pebs = 0;
>> + }
>> +
>> if (!intel_pmu_broken_perf_cap()) {
>> /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
>> rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities);
> I've stuck this on top.
>
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -5271,6 +5271,8 @@ static inline bool intel_pmu_broken_perf
> return false;
> }
>
> +#define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED))
> +
> static void update_pmu_cap(struct pmu *pmu)
> {
> unsigned int eax, ebx, ecx, edx;
> @@ -5292,17 +5294,16 @@ static void update_pmu_cap(struct pmu *p
> &eax, &ebx, &ecx, &edx);
> hybrid(pmu, cntr_mask64) = eax;
> hybrid(pmu, fixed_cntr_mask64) = ebx;
> - cntrs_mask = (u64)ebx << INTEL_PMC_IDX_FIXED | eax;
> + cntrs_mask = counter_mask(eax, ebx);
> }
>
> if (eax_0.split.acr_subleaf) {
> cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
> &eax, &ebx, &ecx, &edx);
> /* The mask of the counters which can be reloaded */
> - hybrid(pmu, acr_cntr_mask64) = eax | ((u64)ebx << INTEL_PMC_IDX_FIXED);
> -
> + hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx);
> /* The mask of the counters which can cause a reload of reloadable counters */
> - hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
> + hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx);
> }
>
> /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
> @@ -5313,8 +5314,8 @@ static void update_pmu_cap(struct pmu *p
>
> cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF,
> &eax, &ebx, &ecx, &edx);
> - pebs_mask = ((u64)ecx << INTEL_PMC_IDX_FIXED) | eax;
> - pdists_mask = ((u64)edx << INTEL_PMC_IDX_FIXED) | ebx;
> + pebs_mask = counter_mask(eax, ecx);
> + pdists_mask = counter_mask(ebx, edx);
> hybrid(pmu, arch_pebs_cap).counters = pebs_mask;
> hybrid(pmu, arch_pebs_cap).pdists = pdists_mask;
Nice suggestion. Would do. Thanks.
>
Powered by blists - more mailing lists