lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <a95b1963-5f54-46d2-a2b4-159730ff7fed@amd.com>
Date: Mon, 27 May 2024 11:57:17 +0530
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>
To: mingo@...hat.com, tglx@...utronix.de, bp@...en8.de,
 dave.hansen@...ux.intel.com, x86@...nel.org
Cc: peterz@...radead.org, ananth.narayan@....com, gautham.shenoy@....com,
 linux-perf-users@...r.kernel.org, linux-hardening@...r.kernel.org,
 linux-kernel@...r.kernel.org, sandipan.das@....com,
 alexander.shishkin@...ux.intel.com, irogers@...gle.com,
 gustavoars@...nel.org, kprateek.nayak@....com, ravi.bangoria@....com
Subject: Re: [PATCH v2 2/2] perf/x86/rapl: Fix the energy-pkg event for AMD
 CPUs

Hello Ingo,

Gentle ping, since the commit 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf"),
went in 6.10-rc1 as a part of https://lore.kernel.org/lkml/ZkHBjIwLlEkVD2Vu@gmail.com/, can this fix be 
considered for 6.10-rc material.

This still cleanly applies on top of 6.10-rc1.

Thanks,
Dhananjay

On 5/4/2024 9:39 AM, Dhananjay Ugwekar wrote:
> After commit ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf"),
> on AMD processors that support extended CPUID leaf 0x80000026, the
> topology_die_cpumask() and topology_logical_die_id() macros, no longer 
> return the package cpumask and package id, instead they return the CCD 
> (Core Complex Die) mask and id respectively. This leads to the energy-pkg 
> event scope to be modified to CCD instead of package.
> 
> Replacing these macros with their package counterparts fixes the
> energy-pkg event for AMD CPUs.
> 
> However due to the difference between the scope of energy-pkg event for 
> Intel and AMD CPUs, we have to replace these macros conditionally only for
> AMD CPUs.
> 
> On a 12 CCD 1 Package AMD Zen4 Genoa machine:
> 
> Before:
> $ cat /sys/devices/power/cpumask
> 0,8,16,24,32,40,48,56,64,72,80,88.
> 
> The expected cpumask here is supposed to be just "0", as it is a package 
> scope event, only one CPU will be collecting the event for all the CPUs in 
> the package.
> 
> After:
> $ cat /sys/devices/power/cpumask
> 0
> 
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>
> Fixes: 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
> ---
> v1->v2:
> * Fix the const qualifier discarded error (kernel test bot)
> * Change the cpumask variable name from cpumask to rapl_pmu_cpumask
> ---
>  arch/x86/events/rapl.c | 30 ++++++++++++++++++++++++++----
>  1 file changed, 26 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
> index 46e673585560..d77bf7959a43 100644
> --- a/arch/x86/events/rapl.c
> +++ b/arch/x86/events/rapl.c
> @@ -102,6 +102,10 @@ static struct perf_pmu_events_attr event_attr_##v = {				\
>  	.event_str	= str,							\
>  };
>  
> +#define rapl_pmu_is_pkg_scope()				\
> +	(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||	\
> +	 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
> +
>  struct rapl_pmu {
>  	raw_spinlock_t		lock;
>  	int			n_active;
> @@ -139,9 +143,21 @@ static unsigned int rapl_cntr_mask;
>  static u64 rapl_timer_ms;
>  static struct perf_msr *rapl_msrs;
>  
> +static inline unsigned int get_rapl_pmu_idx(int cpu)
> +{
> +	return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
> +					 topology_logical_die_id(cpu);
> +}
> +
> +static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
> +{
> +	return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
> +					 topology_die_cpumask(cpu);
> +}
> +
>  static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
>  {
> -	unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
> +	unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>  
>  	/*
>  	 * The unsigned check also catches the '-1' return value for non
> @@ -542,6 +558,7 @@ static struct perf_msr amd_rapl_msrs[] = {
>  
>  static int rapl_cpu_offline(unsigned int cpu)
>  {
> +	const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
>  	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
>  	int target;
>  
> @@ -551,7 +568,7 @@ static int rapl_cpu_offline(unsigned int cpu)
>  
>  	pmu->cpu = -1;
>  	/* Find a new cpu to collect rapl events */
> -	target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
> +	target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>  
>  	/* Migrate rapl events to the new target */
>  	if (target < nr_cpu_ids) {
> @@ -564,6 +581,8 @@ static int rapl_cpu_offline(unsigned int cpu)
>  
>  static int rapl_cpu_online(unsigned int cpu)
>  {
> +	unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
> +	const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
>  	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
>  	int target;
>  
> @@ -578,14 +597,14 @@ static int rapl_cpu_online(unsigned int cpu)
>  		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
>  		rapl_hrtimer_init(pmu);
>  
> -		rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
> +		rapl_pmus->pmus[rapl_pmu_idx] = pmu;
>  	}
>  
>  	/*
>  	 * Check if there is an online cpu in the package which collects rapl
>  	 * events already.
>  	 */
> -	target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
> +	target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask);
>  	if (target < nr_cpu_ids)
>  		return 0;
>  
> @@ -676,6 +695,9 @@ static int __init init_rapl_pmus(void)
>  {
>  	int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
>  
> +	if (rapl_pmu_is_pkg_scope())
> +		nr_rapl_pmu = topology_max_packages();
> +
>  	rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
>  	if (!rapl_pmus)
>  		return -ENOMEM;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ