[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240325064023.2997-6-adrian.hunter@intel.com>
Date: Mon, 25 Mar 2024 08:40:09 +0200
From: Adrian Hunter <adrian.hunter@...el.com>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
"Aneesh Kumar K.V" <aneesh.kumar@...nel.org>,
"Naveen N. Rao" <naveen.n.rao@...ux.ibm.com>,
Heiko Carstens <hca@...ux.ibm.com>,
Vasily Gorbik <gor@...ux.ibm.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>,
Sven Schnelle <svens@...ux.ibm.com>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Andy Lutomirski <luto@...nel.org>,
Vincenzo Frascino <vincenzo.frascino@....com>,
John Stultz <jstultz@...gle.com>,
Stephen Boyd <sboyd@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Randy Dunlap <rdunlap@...radead.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Arnd Bergmann <arnd@...db.de>,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
linuxppc-dev@...ts.ozlabs.org,
linux-kernel@...r.kernel.org,
linux-s390@...r.kernel.org
Subject: [PATCH V2 05/19] vdso: math64: Provide mul_u64_u32_add_u64_shr()
Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used
by timekeeping and VDSO.
Place #include <vdso/math64.h> after #include <asm/div64.h> to allow
architecture-specific overrides, at least for the kernel.
Signed-off-by: Adrian Hunter <adrian.hunter@...el.com>
---
include/linux/math64.h | 2 +-
include/vdso/math64.h | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/include/linux/math64.h b/include/linux/math64.h
index fd13622b2056..d34def7f9a8c 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -4,8 +4,8 @@
#include <linux/types.h>
#include <linux/math.h>
-#include <vdso/math64.h>
#include <asm/div64.h>
+#include <vdso/math64.h>
#if BITS_PER_LONG == 64
diff --git a/include/vdso/math64.h b/include/vdso/math64.h
index 7da703ee5561..22ae212f8b28 100644
--- a/include/vdso/math64.h
+++ b/include/vdso/math64.h
@@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_add_u64_shr
+static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
+{
+ return (u64)((((unsigned __int128)a * mul) + b) >> shift);
+}
+#endif /* mul_u64_u32_add_u64_shr */
+
+#else
+
+#ifndef mul_u64_u32_add_u64_shr
+#ifndef mul_u32_u32
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#define mul_u32_u32 mul_u32_u32
+#endif
+static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
+{
+ u32 ah = a >> 32, al = a;
+ bool ovf;
+ u64 ret;
+
+ ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
+ ret >>= shift;
+ if (ovf && shift)
+ ret += 1ULL << (64 - shift);
+ if (ah)
+ ret += mul_u32_u32(ah, mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_add_u64_shr */
+
+#endif
+
#endif /* __VDSO_MATH64_H */
--
2.34.1
Powered by blists - more mailing lists