[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0d70d735-a4ba-4bc0-8f3d-24e31c870566@amd.com>
Date: Thu, 9 Nov 2023 14:52:06 -0600
From: "Moger, Babu" <babu.moger@....com>
To: James Morse <james.morse@....com>, x86@...nel.org,
linux-kernel@...r.kernel.org
Cc: Fenghua Yu <fenghua.yu@...el.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
H Peter Anvin <hpa@...or.com>,
shameerali.kolothum.thodi@...wei.com,
D Scott Phillips OS <scott@...amperecomputing.com>,
carl@...amperecomputing.com, lcherian@...vell.com,
bobo.shaobowang@...wei.com, tan.shaopeng@...itsu.com,
baolin.wang@...ux.alibaba.com, Jamie Iles <quic_jiles@...cinc.com>,
Xin Hao <xhao@...ux.alibaba.com>, peternewman@...gle.com,
dfustini@...libre.com, amitsinght@...vell.com
Subject: Re: [PATCH v7 22/24] x86/resctrl: Add CPU offline callback for
resctrl work
On 10/25/23 13:03, James Morse wrote:
> The resctrl architecture specific code may need to free a domain when
> a CPU goes offline, it also needs to reset the CPUs PQR_ASSOC register.
> Amongst other things, the resctrl filesystem code needs to clear this
> CPU from the cpu_mask of any control and monitor groups.
>
> Currently this is all done in core.c and called from
> resctrl_offline_cpu(), making the split between architecture and
> filesystem code unclear.
>
> Move the filesystem work to remove the CPU from the control and monitor
> groups into a filesystem helper called resctrl_offline_cpu(), and rename
> the one in core.c resctrl_arch_offline_cpu().
>
> Tested-by: Shaopeng Tan <tan.shaopeng@...itsu.com>
> Tested-by: Peter Newman <peternewman@...gle.com>
> Reviewed-by: Shaopeng Tan <tan.shaopeng@...itsu.com>
> Reviewed-by: Reinette Chatre <reinette.chatre@...el.com>
> Signed-off-by: James Morse <james.morse@....com>
Reviewed-by: Babu Moger <babu.moger@....com>
> ---
> No changes since v6
>
> arch/x86/kernel/cpu/resctrl/core.c | 25 +++++--------------------
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 24 ++++++++++++++++++++++++
> include/linux/resctrl.h | 1 +
> 3 files changed, 30 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
> index 7e44f2c40897..7d09b8d7c653 100644
> --- a/arch/x86/kernel/cpu/resctrl/core.c
> +++ b/arch/x86/kernel/cpu/resctrl/core.c
> @@ -627,31 +627,15 @@ static int resctrl_arch_online_cpu(unsigned int cpu)
> return 0;
> }
>
> -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
> +static int resctrl_arch_offline_cpu(unsigned int cpu)
> {
> - struct rdtgroup *cr;
> -
> - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
> - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
> - break;
> - }
> - }
> -}
> -
> -static int resctrl_offline_cpu(unsigned int cpu)
> -{
> - struct rdtgroup *rdtgrp;
> struct rdt_resource *r;
>
> mutex_lock(&rdtgroup_mutex);
> + resctrl_offline_cpu(cpu);
> +
> for_each_capable_rdt_resource(r)
> domain_remove_cpu(cpu, r);
> - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
> - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
> - clear_childcpus(rdtgrp, cpu);
> - break;
> - }
> - }
> clear_closid_rmid(cpu);
> mutex_unlock(&rdtgroup_mutex);
>
> @@ -973,7 +957,8 @@ static int __init resctrl_late_init(void)
>
> state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
> "x86/resctrl/cat:online:",
> - resctrl_arch_online_cpu, resctrl_offline_cpu);
> + resctrl_arch_online_cpu,
> + resctrl_arch_offline_cpu);
> if (state < 0)
> return state;
>
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index e22e0f6adeb3..971a8397e243 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -4022,6 +4022,30 @@ void resctrl_online_cpu(unsigned int cpu)
> cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
> }
>
> +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
> +{
> + struct rdtgroup *cr;
> +
> + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
> + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
> + break;
> + }
> +}
> +
> +void resctrl_offline_cpu(unsigned int cpu)
> +{
> + struct rdtgroup *rdtgrp;
> +
> + lockdep_assert_held(&rdtgroup_mutex);
> +
> + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
> + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
> + clear_childcpus(rdtgrp, cpu);
> + break;
> + }
> + }
> +}
> +
> /*
> * rdtgroup_init - rdtgroup initialization
> *
> diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
> index ccbbbe5d18d3..270ff1d5c051 100644
> --- a/include/linux/resctrl.h
> +++ b/include/linux/resctrl.h
> @@ -226,6 +226,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
> int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
> void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
> void resctrl_online_cpu(unsigned int cpu);
> +void resctrl_offline_cpu(unsigned int cpu);
>
> /**
> * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
--
Thanks
Babu Moger
Powered by blists - more mailing lists