[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240711102436.4432-6-Dhananjay.Ugwekar@amd.com>
Date: Thu, 11 Jul 2024 10:24:33 +0000
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>
To: <peterz@...radead.org>, <mingo@...hat.com>, <acme@...nel.org>,
<namhyung@...nel.org>, <mark.rutland@....com>,
<alexander.shishkin@...ux.intel.com>, <jolsa@...nel.org>,
<irogers@...gle.com>, <adrian.hunter@...el.com>, <kan.liang@...ux.intel.com>,
<tglx@...utronix.de>, <bp@...en8.de>, <dave.hansen@...ux.intel.com>,
<x86@...nel.org>, <kees@...nel.org>, <gustavoars@...nel.org>,
<rui.zhang@...el.com>, <oleksandr@...alenko.name>
CC: <linux-perf-users@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-hardening@...r.kernel.org>, <ananth.narayan@....com>,
<gautham.shenoy@....com>, <kprateek.nayak@....com>, <ravi.bangoria@....com>,
<sandipan.das@....com>, <linux-pm@...r.kernel.org>,
<Dhananjay.Ugwekar@....com>
Subject: [PATCH v4 05/11] perf/x86/rapl: Move cpumask variable to rapl_pmus struct
This patch is in preparation for addition of per-core energy counter
support for AMD CPUs.
Per-core energy counter PMU will need a separate cpumask. It seems like
a better approach to add the cpumask inside the rapl_pmus struct, instead
of creating another global cpumask variable for per-core PMU. This way, in
future, if there is a need for a new PMU with a different scope (e.g. CCD),
adding a new global cpumask variable won't be necessary.
No functional change.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@....com>
---
Changes in v4:
* Use cpumask_var_t instead of cpumask_t (Peter)
---
arch/x86/events/rapl.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index d231a9c068af..e3d0a82e12b9 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -122,6 +122,7 @@ struct rapl_pmu {
struct rapl_pmus {
struct pmu pmu;
+ cpumask_var_t cpumask;
unsigned int nr_rapl_pmu;
struct rapl_pmu *rapl_pmu[] __counted_by(nr_rapl_pmu);
};
@@ -142,7 +143,6 @@ struct rapl_model {
/* 1/2^hw_unit Joule */
static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
static struct rapl_pmus *rapl_pmus;
-static cpumask_t rapl_cpu_mask;
static unsigned int rapl_cntr_mask;
static u64 rapl_timer_ms;
static struct perf_msr *rapl_msrs;
@@ -401,7 +401,7 @@ static void rapl_pmu_event_read(struct perf_event *event)
static ssize_t rapl_get_attr_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
+ return cpumap_print_to_pagebuf(true, buf, rapl_pmus->cpumask);
}
static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
@@ -572,7 +572,7 @@ static int rapl_cpu_offline(unsigned int cpu)
int target;
/* Check if exiting cpu is used for collecting rapl events */
- if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
+ if (!cpumask_test_and_clear_cpu(cpu, rapl_pmus->cpumask))
return 0;
rapl_pmu->cpu = -1;
@@ -581,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu)
/* Migrate rapl events to the new target */
if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &rapl_cpu_mask);
+ cpumask_set_cpu(target, rapl_pmus->cpumask);
rapl_pmu->cpu = target;
perf_pmu_migrate_context(rapl_pmu->pmu, cpu, target);
}
@@ -613,11 +613,11 @@ static int rapl_cpu_online(unsigned int cpu)
* Check if there is an online cpu in the package which collects rapl
* events already.
*/
- target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask);
+ target = cpumask_any_and(rapl_pmus->cpumask, rapl_pmu_cpumask);
if (target < nr_cpu_ids)
return 0;
- cpumask_set_cpu(cpu, &rapl_cpu_mask);
+ cpumask_set_cpu(cpu, rapl_pmus->cpumask);
rapl_pmu->cpu = cpu;
return 0;
}
@@ -710,6 +710,9 @@ static int __init init_rapl_pmus(void)
rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu, nr_rapl_pmu), GFP_KERNEL);
if (!rapl_pmus)
return -ENOMEM;
+
+ if (!alloc_cpumask_var(&rapl_pmus->cpumask, GFP_KERNEL))
+ return -ENOMEM;
rapl_pmus->nr_rapl_pmu = nr_rapl_pmu;
rapl_pmus->pmu.attr_groups = rapl_attr_groups;
--
2.34.1
Powered by blists - more mailing lists