[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <171258184187.10875.16271014936581417028.tip-bot2@tip-bot2>
Date: Mon, 08 Apr 2024 13:10:41 -0000
From: "tip-bot2 for Adrian Hunter" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Adrian Hunter <adrian.hunter@...el.com>,
Thomas Gleixner <tglx@...utronix.de>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: timers/core] vdso, math64: Provide mul_u64_u32_add_u64_shr()
The following commit has been merged into the timers/core branch of tip:
Commit-ID: 1beb35ec615f676d49d68b6dc23c7418ba8ff145
Gitweb: https://git.kernel.org/tip/1beb35ec615f676d49d68b6dc23c7418ba8ff145
Author: Adrian Hunter <adrian.hunter@...el.com>
AuthorDate: Mon, 25 Mar 2024 08:40:09 +02:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Mon, 08 Apr 2024 15:03:07 +02:00
vdso, math64: Provide mul_u64_u32_add_u64_shr()
Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used
by timekeeping and VDSO.
Place #include <vdso/math64.h> after #include <asm/div64.h> to allow
architecture-specific overrides, at least for the kernel.
Signed-off-by: Adrian Hunter <adrian.hunter@...el.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Link: https://lore.kernel.org/r/20240325064023.2997-6-adrian.hunter@intel.com
---
include/linux/math64.h | 2 +-
include/vdso/math64.h | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/include/linux/math64.h b/include/linux/math64.h
index fd13622..d34def7 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -4,8 +4,8 @@
#include <linux/types.h>
#include <linux/math.h>
-#include <vdso/math64.h>
#include <asm/div64.h>
+#include <vdso/math64.h>
#if BITS_PER_LONG == 64
diff --git a/include/vdso/math64.h b/include/vdso/math64.h
index 7da703e..22ae212 100644
--- a/include/vdso/math64.h
+++ b/include/vdso/math64.h
@@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_add_u64_shr
+static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
+{
+ return (u64)((((unsigned __int128)a * mul) + b) >> shift);
+}
+#endif /* mul_u64_u32_add_u64_shr */
+
+#else
+
+#ifndef mul_u64_u32_add_u64_shr
+#ifndef mul_u32_u32
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#define mul_u32_u32 mul_u32_u32
+#endif
+static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
+{
+ u32 ah = a >> 32, al = a;
+ bool ovf;
+ u64 ret;
+
+ ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
+ ret >>= shift;
+ if (ovf && shift)
+ ret += 1ULL << (64 - shift);
+ if (ah)
+ ret += mul_u32_u32(ah, mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_add_u64_shr */
+
+#endif
+
#endif /* __VDSO_MATH64_H */
Powered by blists - more mailing lists