lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 26 Feb 2015 01:19:10 -0500
From:	green@...uxhacker.ru
To:	Rusty Russell <rusty@...tcorp.com.au>,
	Andrew Morton <akpm@...ux-foundation.org>,
	"David S. Miller" <davem@...emloft.net>
Cc:	linux-kernel@...r.kernel.org, Oleg Drokin <green@...uxhacker.ru>
Subject: [PATCH 1/2] cpumask: Properly calculate cpumask values

From: Oleg Drokin <green@...uxhacker.ru>

With CONFIG_CPUMASK_OFFSTACK enabled there seems to be some disparity
between theoretical maximum of CPUs in the system (NR_CPUS that is huge)
and the actual value that is calculated at runtime (nr_cpu_ids).

Functions like cpus_weight should only check up to nr_cpu_ids bits
in the cpu mask, as there's no point to go all the way to 8192 bits
of theoritically possibly CPUs.

Signed-off-by: Oleg Drokin <green@...uxhacker.ru>
---
 include/linux/cpumask.h | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 086549a..f0599e1 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -902,21 +902,24 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
 	return test_and_set_bit(cpu, addr->bits);
 }
 
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
+#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), \
+					     nr_cpumask_bits)
 static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
 	return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
 }
 
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
+#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), \
+					   nr_cpumask_bits)
 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
 	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
 }
 
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
+#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), \
+					     nr_cpumask_bits)
 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
@@ -924,48 +927,50 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
 }
 
 #define cpus_andnot(dst, src1, src2) \
-				__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
+				__cpus_andnot(&(dst), &(src1), &(src2), \
+					      nr_cpumask_bits)
 static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
 	return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
 }
 
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
+#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), nr_cpumask_bits)
 static inline int __cpus_equal(const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
 	return bitmap_equal(src1p->bits, src2p->bits, nbits);
 }
 
-#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
+#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), \
+						      nr_cpumask_bits)
 static inline int __cpus_intersects(const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
 	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
 }
 
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
+#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), nr_cpumask_bits)
 static inline int __cpus_subset(const cpumask_t *src1p,
 					const cpumask_t *src2p, unsigned int nbits)
 {
 	return bitmap_subset(src1p->bits, src2p->bits, nbits);
 }
 
-#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
+#define cpus_empty(src) __cpus_empty(&(src), nr_cpumask_bits)
 static inline int __cpus_empty(const cpumask_t *srcp, unsigned int nbits)
 {
 	return bitmap_empty(srcp->bits, nbits);
 }
 
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
+#define cpus_weight(cpumask) __cpus_weight(&(cpumask), nr_cpumask_bits)
 static inline int __cpus_weight(const cpumask_t *srcp, unsigned int nbits)
 {
 	return bitmap_weight(srcp->bits, nbits);
 }
 
 #define cpus_shift_left(dst, src, n) \
-			__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
+			__cpus_shift_left(&(dst), &(src), (n), nr_cpumask_bits)
 static inline void __cpus_shift_left(cpumask_t *dstp,
 					const cpumask_t *srcp, int n, int nbits)
 {
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ