[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CO6PR18MB44654A1AF5A02C120892F476E3B69@CO6PR18MB4465.namprd18.prod.outlook.com>
Date: Tue, 12 Oct 2021 08:02:10 +0000
From: Bharat Bhushan <bbhushan2@...vell.com>
To: Bharat Bhushan <bbhushan2@...vell.com>,
"will@...nel.org" <will@...nel.org>,
"mark.rutland@....com" <mark.rutland@....com>,
"robh+dt@...nel.org" <robh+dt@...nel.org>,
"linux-arm-kernel@...ts.infradead.org"
<linux-arm-kernel@...ts.infradead.org>,
"devicetree@...r.kernel.org" <devicetree@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Bhaskara Budiredla <bbudiredla@...vell.com>
Subject: RE: [PATCH v4 4/4] perf/marvell: cn10k DDR perf event core ownership
Adding Bhaskara
> -----Original Message-----
> From: Bharat Bhushan <bbhushan2@...vell.com>
> Sent: Monday, September 20, 2021 10:38 AM
> To: will@...nel.org; mark.rutland@....com; robh+dt@...nel.org; linux-arm-
> kernel@...ts.infradead.org; devicetree@...r.kernel.org; linux-
> kernel@...r.kernel.org
> Cc: Bharat Bhushan <bbhushan2@...vell.com>
> Subject: [PATCH v4 4/4] perf/marvell: cn10k DDR perf event core ownership
>
> As DDR perf event counters are not per core, so they should be accessed only by
> one core at a time. Select new core when previously owning core is going offline.
>
> Signed-off-by: Bharat Bhushan <bbhushan2@...vell.com>
> ---
> v1->v2->v3->v4:
> - No change
>
> drivers/perf/marvell_cn10k_ddr_pmu.c | 50 ++++++++++++++++++++++++++--
> include/linux/cpuhotplug.h | 1 +
> 2 files changed, 49 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c
> b/drivers/perf/marvell_cn10k_ddr_pmu.c
> index 21fccb9090c5..bef0cee3a46a 100644
> --- a/drivers/perf/marvell_cn10k_ddr_pmu.c
> +++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
> @@ -129,6 +129,7 @@ struct cn10k_ddr_pmu {
> int active_events;
> struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
> struct hrtimer hrtimer;
> + struct hlist_node node;
> };
>
> #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
> @@ -610,6 +611,24 @@ static enum hrtimer_restart
> cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
> return HRTIMER_RESTART;
> }
>
> +static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct
> +hlist_node *node) {
> + struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct
> cn10k_ddr_pmu,
> + node);
> + unsigned int target;
> +
> + if (cpu != pmu->cpu)
> + return 0;
> +
> + target = cpumask_any_but(cpu_online_mask, cpu);
> + if (target >= nr_cpu_ids)
> + return 0;
> +
> + perf_pmu_migrate_context(&pmu->pmu, cpu, target);
> + pmu->cpu = target;
> + return 0;
> +}
> +
> static int cn10k_ddr_perf_probe(struct platform_device *pdev) {
> struct cn10k_ddr_pmu *ddr_pmu;
> @@ -661,18 +680,31 @@ static int cn10k_ddr_perf_probe(struct
> platform_device *pdev)
> hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC,
> HRTIMER_MODE_REL);
> ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
>
> + cpuhp_state_add_instance_nocalls(
> +
> CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
> + &ddr_pmu->node);
> +
> ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
> if (ret)
> - return ret;
> + goto error;
>
> pr_info("CN10K DDR PMU Driver for ddrc@...x\n", res->start);
> return 0;
> +error:
> + cpuhp_state_remove_instance_nocalls(
> +
> CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
> + &ddr_pmu->node);
> + return ret;
> }
>
> static int cn10k_ddr_perf_remove(struct platform_device *pdev) {
> struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
>
> + cpuhp_state_remove_instance_nocalls(
> +
> CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
> + &ddr_pmu->node);
> +
> perf_pmu_unregister(&ddr_pmu->pmu);
> return 0;
> }
> @@ -695,12 +727,26 @@ static struct platform_driver cn10k_ddr_pmu_driver = {
>
> static int __init cn10k_ddr_pmu_init(void) {
> - return platform_driver_register(&cn10k_ddr_pmu_driver);
> + int ret;
> +
> + ret = cpuhp_setup_state_multi(
> +
> CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
> + "perf/marvell/cn10k/ddr:online", NULL,
> + cn10k_ddr_pmu_offline_cpu);
> + if (ret)
> + return ret;
> +
> + ret = platform_driver_register(&cn10k_ddr_pmu_driver);
> + if (ret)
> + cpuhp_remove_multi_state(
> +
> CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
> + return ret;
> }
>
> static void __exit cn10k_ddr_pmu_exit(void) {
> platform_driver_unregister(&cn10k_ddr_pmu_driver);
> +
> cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_D
> DR_ONLINE);
> }
>
> module_init(cn10k_ddr_pmu_init);
> diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index
> 832d8a74fa59..a4b521f12b58 100644
> --- a/include/linux/cpuhotplug.h
> +++ b/include/linux/cpuhotplug.h
> @@ -227,6 +227,7 @@ enum cpuhp_state {
> CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
> CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
> CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
> + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
> CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
> CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
> CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
> --
> 2.17.1
Powered by blists - more mailing lists