[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211029115643.32351-5-bbhushan2@marvell.com>
Date: Fri, 29 Oct 2021 17:26:43 +0530
From: Bharat Bhushan <bbhushan2@...vell.com>
To: <will@...nel.org>, <mark.rutland@....com>, <robh+dt@...nel.org>,
<bbudiredla@...vell.com>, <sgoutham@...vell.com>,
<linux-arm-kernel@...ts.infradead.org>,
<devicetree@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: Bharat Bhushan <bbhushan2@...vell.com>
Subject: [PATCH v6 4/4] perf/marvell: cn10k DDR perf event core ownership
As DDR perf event counters are not per core, so they should be accessed
only by one core at a time. Select new core when previously owning core
is going offline.
Signed-off-by: Bharat Bhushan <bbhushan2@...vell.com>
---
v1->v6
- No Change
drivers/perf/marvell_cn10k_ddr_pmu.c | 50 ++++++++++++++++++++++++++--
include/linux/cpuhotplug.h | 1 +
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 21fccb9090c5..bef0cee3a46a 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -129,6 +129,7 @@ struct cn10k_ddr_pmu {
int active_events;
struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
struct hrtimer hrtimer;
+ struct hlist_node node;
};
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
@@ -610,6 +611,24 @@ static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
+static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
+ node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu;
@@ -661,18 +680,31 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ cpuhp_state_add_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
if (ret)
- return ret;
+ goto error;
pr_info("CN10K DDR PMU Driver for ddrc@...x\n", res->start);
return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+ return ret;
}
static int cn10k_ddr_perf_remove(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ &ddr_pmu->node);
+
perf_pmu_unregister(&ddr_pmu->pmu);
return 0;
}
@@ -695,12 +727,26 @@ static struct platform_driver cn10k_ddr_pmu_driver = {
static int __init cn10k_ddr_pmu_init(void)
{
- return platform_driver_register(&cn10k_ddr_pmu_driver);
+ int ret;
+
+ ret = cpuhp_setup_state_multi(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ "perf/marvell/cn10k/ddr:online", NULL,
+ cn10k_ddr_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&cn10k_ddr_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
+ return ret;
}
static void __exit cn10k_ddr_pmu_exit(void)
{
platform_driver_unregister(&cn10k_ddr_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
}
module_init(cn10k_ddr_pmu_init);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 832d8a74fa59..a4b521f12b58 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -227,6 +227,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
--
2.17.1
Powered by blists - more mailing lists