[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YfLUYSh8Qyv6wEHd@swahl-home.5wahls.com>
Date: Thu, 27 Jan 2022 11:20:33 -0600
From: Steve Wahl <steve.wahl@....com>
To: Yury Norov <yury.norov@...il.com>
Cc: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
Andrew Morton <akpm@...ux-foundation.org>,
Michał Mirosław <mirq-linux@...e.qmqm.pl>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Peter Zijlstra <peterz@...radead.org>,
David Laight <David.Laight@...lab.com>,
Joe Perches <joe@...ches.com>, Dennis Zhou <dennis@...nel.org>,
Emil Renner Berthing <kernel@...il.dk>,
Nicholas Piggin <npiggin@...il.com>,
Matti Vaittinen <matti.vaittinen@...rohmeurope.com>,
Alexey Klimov <aklimov@...hat.com>,
linux-kernel@...r.kernel.org, Fenghua Yu <fenghua.yu@...el.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>,
Steven Rostedt <rostedt@...dmis.org>,
Karol Herbst <karolherbst@...il.com>,
Pekka Paalanen <ppaalanen@...il.com>,
Andy Lutomirski <luto@...nel.org>,
Steve Wahl <steve.wahl@....com>,
Mike Travis <mike.travis@....com>,
Dimitri Sivanich <dimitri.sivanich@....com>,
Russ Anderson <russ.anderson@....com>,
Darren Hart <dvhart@...radead.org>,
Andy Shevchenko <andy@...radead.org>, x86@...nel.org,
nouveau@...ts.freedesktop.org, platform-driver-x86@...r.kernel.org
Subject: Re: [PATCH 15/54] arch/x86: replace cpumask_weight with
cpumask_empty where appropriate
Reviewed-by: Steve Wahl <steve.wahl@....com>
On Sun, Jan 23, 2022 at 10:38:46AM -0800, Yury Norov wrote:
> In some cases, arch/x86 code calls cpumask_weight() to check if any bit of
> a given cpumask is set. We can do it more efficiently with cpumask_empty()
> because cpumask_empty() stops traversing the cpumask as soon as it finds
> first set bit, while cpumask_weight() counts all bits unconditionally.
>
> Signed-off-by: Yury Norov <yury.norov@...il.com>
> ---
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 14 +++++++-------
> arch/x86/mm/mmio-mod.c | 2 +-
> arch/x86/platform/uv/uv_nmi.c | 2 +-
> 3 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index b57b3db9a6a7..e23ff03290b8 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -341,14 +341,14 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
>
> /* Check whether cpus belong to parent ctrl group */
> cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
> - if (cpumask_weight(tmpmask)) {
> + if (!cpumask_empty(tmpmask)) {
> rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
> return -EINVAL;
> }
>
> /* Check whether cpus are dropped from this group */
> cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
> - if (cpumask_weight(tmpmask)) {
> + if (!cpumask_empty(tmpmask)) {
> /* Give any dropped cpus to parent rdtgroup */
> cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
> update_closid_rmid(tmpmask, prgrp);
> @@ -359,7 +359,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
> * and update per-cpu rmid
> */
> cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
> - if (cpumask_weight(tmpmask)) {
> + if (!cpumask_empty(tmpmask)) {
> head = &prgrp->mon.crdtgrp_list;
> list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
> if (crgrp == rdtgrp)
> @@ -394,7 +394,7 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
>
> /* Check whether cpus are dropped from this group */
> cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
> - if (cpumask_weight(tmpmask)) {
> + if (!cpumask_empty(tmpmask)) {
> /* Can't drop from default group */
> if (rdtgrp == &rdtgroup_default) {
> rdt_last_cmd_puts("Can't drop CPUs from default group\n");
> @@ -413,12 +413,12 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
> * and update per-cpu closid/rmid.
> */
> cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
> - if (cpumask_weight(tmpmask)) {
> + if (!cpumask_empty(tmpmask)) {
> list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
> if (r == rdtgrp)
> continue;
> cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
> - if (cpumask_weight(tmpmask1))
> + if (!cpumask_empty(tmpmask1))
> cpumask_rdtgrp_clear(r, tmpmask1);
> }
> update_closid_rmid(tmpmask, rdtgrp);
> @@ -488,7 +488,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
>
> /* check that user didn't specify any offline cpus */
> cpumask_andnot(tmpmask, newmask, cpu_online_mask);
> - if (cpumask_weight(tmpmask)) {
> + if (!cpumask_empty(tmpmask)) {
> ret = -EINVAL;
> rdt_last_cmd_puts("Can only assign online CPUs\n");
> goto unlock;
> diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
> index 933a2ebad471..c3317f0650d8 100644
> --- a/arch/x86/mm/mmio-mod.c
> +++ b/arch/x86/mm/mmio-mod.c
> @@ -400,7 +400,7 @@ static void leave_uniprocessor(void)
> int cpu;
> int err;
>
> - if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
> + if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
> return;
> pr_notice("Re-enabling CPUs...\n");
> for_each_cpu(cpu, downed_cpus) {
> diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
> index 1e9ff28bc2e0..ea277fc08357 100644
> --- a/arch/x86/platform/uv/uv_nmi.c
> +++ b/arch/x86/platform/uv/uv_nmi.c
> @@ -985,7 +985,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
>
> /* Clear global flags */
> if (master) {
> - if (cpumask_weight(uv_nmi_cpu_mask))
> + if (!cpumask_empty(uv_nmi_cpu_mask))
> uv_nmi_cleanup_mask();
> atomic_set(&uv_nmi_cpus_in_nmi, -1);
> atomic_set(&uv_nmi_cpu, -1);
> --
> 2.30.2
>
--
Steve Wahl, Hewlett Packard Enterprise
Powered by blists - more mailing lists