[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <166257368306.1043018.15116501631578530094.stgit@bmoger-ubuntu>
Date: Wed, 7 Sep 2022 13:01:23 -0500
From: Babu Moger <babu.moger@....com>
To: <corbet@....net>, <reinette.chatre@...el.com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bp@...en8.de>
CC: <fenghua.yu@...el.com>, <dave.hansen@...ux.intel.com>,
<x86@...nel.org>, <hpa@...or.com>, <paulmck@...nel.org>,
<akpm@...ux-foundation.org>, <quic_neeraju@...cinc.com>,
<rdunlap@...radead.org>, <damien.lemoal@...nsource.wdc.com>,
<songmuchun@...edance.com>, <peterz@...radead.org>,
<jpoimboe@...nel.org>, <pbonzini@...hat.com>, <babu.moger@....com>,
<chang.seok.bae@...el.com>, <pawan.kumar.gupta@...ux.intel.com>,
<jmattson@...gle.com>, <daniel.sneddon@...ux.intel.com>,
<sandipan.das@....com>, <tony.luck@...el.com>,
<james.morse@....com>, <linux-doc@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <bagasdotme@...il.com>,
<eranian@...gle.com>
Subject: [PATCH v4 12/13] x86/resctrl: Replace smp_call_function_many with
on_each_cpu_mask
The call on_each_cpu_mask can run the function on each CPU specified by
cpumask, which may include the local processor. So, replace the call
smp_call_function_many with on_each_cpu_mask to simplify the code.
Signed-off-by: Babu Moger <babu.moger@....com>
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 29 ++++++++---------------------
1 file changed, 8 insertions(+), 21 deletions(-)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 59b484eb1267..fa0e1e8e29aa 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -518,12 +518,7 @@ static void update_cpu_closid_rmid(void *info)
static void
update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
{
- int cpu = get_cpu();
-
- if (cpumask_test_cpu(cpu, cpu_mask))
- update_cpu_closid_rmid(r);
- smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
- put_cpu();
+ on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1);
}
static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
@@ -2058,13 +2053,9 @@ static int set_cache_qos_cfg(int level, bool enable)
/* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
}
- cpu = get_cpu();
- /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
- if (cpumask_test_cpu(cpu, cpu_mask))
- update(&enable);
- /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
- smp_call_function_many(cpu_mask, update, &enable, 1);
- put_cpu();
+
+ /* Update QOS_CFG MSR on all the CPUs in cpu_mask */
+ on_each_cpu_mask(cpu_mask, update, &enable, 1);
free_cpumask_var(cpu_mask);
@@ -2506,7 +2497,7 @@ static int reset_all_ctrls(struct rdt_resource *r)
struct msr_param msr_param;
cpumask_var_t cpu_mask;
struct rdt_domain *d;
- int i, cpu;
+ int i;
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
return -ENOMEM;
@@ -2527,13 +2518,9 @@ static int reset_all_ctrls(struct rdt_resource *r)
for (i = 0; i < hw_res->num_closid; i++)
hw_dom->ctrl_val[i] = r->default_ctrl;
}
- cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
- if (cpumask_test_cpu(cpu, cpu_mask))
- rdt_ctrl_update(&msr_param);
- /* Update CBM on all other cpus in cpu_mask. */
- smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
- put_cpu();
+
+ /* Update CBM on all the CPUs in cpu_mask */
+ on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
free_cpumask_var(cpu_mask);
Powered by blists - more mailing lists