[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211128035704.270739-7-yury.norov@gmail.com>
Date: Sat, 27 Nov 2021 19:57:01 -0800
From: Yury Norov <yury.norov@...il.com>
To: linux-kernel@...r.kernel.org, Yury Norov <yury.norov@...il.com>,
"James E.J. Bottomley" <jejb@...ux.ibm.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Alexey Klimov <aklimov@...hat.com>,
Amitkumar Karwar <amitkarwar@...il.com>,
Andi Kleen <ak@...ux.intel.com>, Andrew Lunn <andrew@...n.ch>,
Andrew Morton <akpm@...ux-foundation.org>,
Andy Gross <agross@...nel.org>,
Andy Lutomirski <luto@...nel.org>,
Andy Shevchenko <andy@...radead.org>,
Anup Patel <anup.patel@....com>,
Ard Biesheuvel <ardb@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Arnd Bergmann <arnd@...db.de>, Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Christoph Hellwig <hch@....de>,
Christoph Lameter <cl@...ux.com>,
Daniel Vetter <daniel@...ll.ch>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Airlie <airlied@...ux.ie>,
David Laight <David.Laight@...LAB.COM>,
Dennis Zhou <dennis@...nel.org>,
Dinh Nguyen <dinguyen@...nel.org>,
Geetha sowjanya <gakula@...vell.com>,
Geert Uytterhoeven <geert@...ux-m68k.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Guo Ren <guoren@...nel.org>,
Hans de Goede <hdegoede@...hat.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Ian Rogers <irogers@...gle.com>,
Ingo Molnar <mingo@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
Jason Wessel <jason.wessel@...driver.com>,
Jens Axboe <axboe@...com>, Jiri Olsa <jolsa@...hat.com>,
Jonathan Cameron <jic23@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Kalle Valo <kvalo@...eaurora.org>,
Kees Cook <keescook@...omium.org>,
Krzysztof Kozlowski <krzysztof.kozlowski@...onical.com>,
Lee Jones <lee.jones@...aro.org>,
Marc Zyngier <maz@...nel.org>, Marcin Wojtas <mw@...ihalf.com>,
Mark Gross <markgross@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Matti Vaittinen <mazziesaccount@...il.com>,
Mauro Carvalho Chehab <mchehab@...nel.org>,
Mel Gorman <mgorman@...e.de>,
Michael Ellerman <mpe@...erman.id.au>,
Mike Marciniszyn <mike.marciniszyn@...nelisnetworks.com>,
Nicholas Piggin <npiggin@...il.com>,
Palmer Dabbelt <palmer@...belt.com>,
Peter Zijlstra <peterz@...radead.org>,
Petr Mladek <pmladek@...e.com>,
Randy Dunlap <rdunlap@...radead.org>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
Roy Pledge <Roy.Pledge@....com>,
Russell King <linux@...linux.org.uk>,
Saeed Mahameed <saeedm@...dia.com>,
Sagi Grimberg <sagi@...mberg.me>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Solomon Peachy <pizza@...ftnet.org>,
Stephen Boyd <sboyd@...nel.org>,
Stephen Rothwell <sfr@...b.auug.org.au>,
Steven Rostedt <rostedt@...dmis.org>,
Subbaraya Sundeep <sbhatta@...vell.com>,
Sudeep Holla <sudeep.holla@....com>,
Sunil Goutham <sgoutham@...vell.com>,
Tariq Toukan <tariqt@...dia.com>, Tejun Heo <tj@...nel.org>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
Thomas Gleixner <tglx@...utronix.de>,
Ulf Hansson <ulf.hansson@...aro.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Vineet Gupta <vgupta@...nel.org>,
Viresh Kumar <viresh.kumar@...aro.org>,
Vivien Didelot <vivien.didelot@...il.com>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
bcm-kernel-feedback-list@...adcom.com, kvm@...r.kernel.org,
linux-alpha@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-crypto@...r.kernel.org, linux-csky@...r.kernel.org,
linux-ia64@...r.kernel.org, linux-mips@...r.kernel.org,
linux-mm@...ck.org, linux-perf-users@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-s390@...r.kernel.org,
linux-snps-arc@...ts.infradead.org, linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH 6/9] lib/nodemask: add nodemask_weight_{eq,gt,le}
Add nodemask_weight_{eq,gt,le} and replace nodemask_weight() where
appropriate. This allows nodemask_weight_*() to return earlier
depending on the condition.
Signed-off-by: Yury Norov <yury.norov@...il.com>
---
arch/x86/mm/amdtopology.c | 2 +-
arch/x86/mm/numa_emulation.c | 4 ++--
drivers/acpi/numa/srat.c | 2 +-
include/linux/nodemask.h | 24 ++++++++++++++++++++++++
mm/mempolicy.c | 2 +-
5 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 058b2f36b3a6..b3ca7d23e4b0 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -154,7 +154,7 @@ int __init amd_numa_init(void)
node_set(nodeid, numa_nodes_parsed);
}
- if (!nodes_weight(numa_nodes_parsed))
+ if (nodes_empty(numa_nodes_parsed))
return -ENOENT;
/*
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 1a02b791d273..9a9305367fdd 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -123,7 +123,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
* Continue to fill physical nodes with fake nodes until there is no
* memory left on any of them.
*/
- while (nodes_weight(physnode_mask)) {
+ while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
u64 start, limit, end;
@@ -270,7 +270,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
* Fill physical nodes with fake nodes of size until there is no memory
* left on any of them.
*/
- while (nodes_weight(physnode_mask)) {
+ while (!nodes_empty(physnode_mask)) {
for_each_node_mask(i, physnode_mask) {
u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
u64 start, limit, end;
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 66a0142dc78c..c4f80d2d85bf 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -67,7 +67,7 @@ int acpi_map_pxm_to_node(int pxm)
node = pxm_to_node_map[pxm];
if (node == NUMA_NO_NODE) {
- if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
+ if (nodes_weight_gt(nodes_found_map, MAX_NUMNODES + 1))
return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
__acpi_map_pxm_to_node(pxm, node);
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 567c3ddba2c4..3801ec5b06f4 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -38,6 +38,9 @@
* int nodes_empty(mask) Is mask empty (no bits sets)?
* int nodes_full(mask) Is mask full (all bits sets)?
* int nodes_weight(mask) Hamming weight - number of set bits
+ * bool nodes_weight_eq(src, nbits, num) Hamming Weight is equal to num
+ * bool nodes_weight_gt(src, nbits, num) Hamming Weight is greater than num
+ * bool nodes_weight_le(src, nbits, num) Hamming Weight is less than num
*
* void nodes_shift_right(dst, src, n) Shift right
* void nodes_shift_left(dst, src, n) Shift left
@@ -240,6 +243,27 @@ static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
return bitmap_weight(srcp->bits, nbits);
}
+#define nodes_weight_eq(nodemask, num) __nodes_weight_eq(&(nodemask), MAX_NUMNODES, (num))
+static inline int __nodes_weight_eq(const nodemask_t *srcp,
+ unsigned int nbits, unsigned int num)
+{
+ return bitmap_weight_eq(srcp->bits, nbits, num);
+}
+
+#define nodes_weight_gt(nodemask, num) __nodes_weight_gt(&(nodemask), MAX_NUMNODES, (num))
+static inline int __nodes_weight_gt(const nodemask_t *srcp,
+ unsigned int nbits, unsigned int num)
+{
+ return bitmap_weight_gt(srcp->bits, nbits, num);
+}
+
+#define nodes_weight_le(nodemask, num) __nodes_weight_le(&(nodemask), MAX_NUMNODES, (num))
+static inline int __nodes_weight_le(const nodemask_t *srcp,
+ unsigned int nbits, unsigned int num)
+{
+ return bitmap_weight_le(srcp->bits, nbits, num);
+}
+
#define nodes_shift_right(dst, src, n) \
__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
static inline void __nodes_shift_right(nodemask_t *dstp,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b1fcdb4d25d6..4a48ce5b86cf 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1154,7 +1154,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
* [0-7] - > [3,4,5] moves only 0,1,2,6,7.
*/
- if ((nodes_weight(*from) != nodes_weight(*to)) &&
+ if (!nodes_weight_eq(*from, nodes_weight(*to)) &&
(node_isset(s, *to)))
continue;
--
2.25.1
Powered by blists - more mailing lists