lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu,  7 Apr 2022 09:52:41 +0200
From:   Artem Savkov <asavkov@...hat.com>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Anna-Maria Behnsen <anna-maria@...utronix.de>,
        netdev@...r.kernel.org
Cc:     davem@...emloft.net, yoshfuji@...ux-ipv6.org, dsahern@...nel.org,
        linux-kernel@...r.kernel.org, Artem Savkov <asavkov@...hat.com>
Subject: [PATCH v4 1/2] timer: add a function to adjust timeouts to be upper bound

Current timer wheel implementation is optimized for performance and
energy usage but lacks in precision. This, normally, is not a problem as
most timers that use timer wheel are used for timeouts and thus rarely
expire, instead they often get canceled or modified before expiration.
Even when they don't, expiring a bit late is not an issue for timeout
timers.

TCP keepalive timer is a special case, it's aim is to prevent timeouts,
so triggering earlier rather than later is desired behavior. In a
reported case the user had a 3600s keepalive timer for preventing firewall
disconnects (on a 3650s interval). They observed keepalive timers coming
in up to four minutes late, causing unexpected disconnects.

This commit adds upper_bound_timeout() function that takes a relative
timeout and adjusts it based on timer wheel granularity so that supplied
value effectively becomes an upper bound for the timer.

This was previously discussed here:
https://lore.kernel.org/all/20210302001054.4qgrvnkltvkgikzr@treble/T/#u

Suggested-by: Josh Poimboeuf <jpoimboe@...hat.com>
Signed-off-by: Artem Savkov <asavkov@...hat.com>
---
 include/linux/timer.h |  1 +
 kernel/time/timer.c   | 68 ++++++++++++++++++++++++++++++++++---------
 2 files changed, 56 insertions(+), 13 deletions(-)

diff --git a/include/linux/timer.h b/include/linux/timer.h
index fda13c9d1256c..b209d31d543f0 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -168,6 +168,7 @@ static inline int timer_pending(const struct timer_list * timer)
 	return !hlist_unhashed_lockless(&timer->entry);
 }
 
+extern unsigned long upper_bound_timeout(unsigned long timeout);
 extern void add_timer_on(struct timer_list *timer, int cpu);
 extern int del_timer(struct timer_list * timer);
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021ad4595..a645b62e257e2 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -507,28 +507,38 @@ static inline unsigned calc_index(unsigned long expires, unsigned lvl,
 	return LVL_OFFS(lvl) + (expires & LVL_MASK);
 }
 
-static int calc_wheel_index(unsigned long expires, unsigned long clk,
-			    unsigned long *bucket_expiry)
+static inline int get_wheel_lvl(unsigned long delta)
 {
-	unsigned long delta = expires - clk;
-	unsigned int idx;
-
 	if (delta < LVL_START(1)) {
-		idx = calc_index(expires, 0, bucket_expiry);
+		return 0;
 	} else if (delta < LVL_START(2)) {
-		idx = calc_index(expires, 1, bucket_expiry);
+		return 1;
 	} else if (delta < LVL_START(3)) {
-		idx = calc_index(expires, 2, bucket_expiry);
+		return 2;
 	} else if (delta < LVL_START(4)) {
-		idx = calc_index(expires, 3, bucket_expiry);
+		return 3;
 	} else if (delta < LVL_START(5)) {
-		idx = calc_index(expires, 4, bucket_expiry);
+		return 4;
 	} else if (delta < LVL_START(6)) {
-		idx = calc_index(expires, 5, bucket_expiry);
+		return 5;
 	} else if (delta < LVL_START(7)) {
-		idx = calc_index(expires, 6, bucket_expiry);
+		return 6;
 	} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
-		idx = calc_index(expires, 7, bucket_expiry);
+		return 7;
+	}
+
+	return -1;
+}
+
+static int calc_wheel_index(unsigned long expires, unsigned long clk,
+			    unsigned long *bucket_expiry)
+{
+	unsigned long delta = expires - clk;
+	unsigned int idx;
+	int lvl = get_wheel_lvl(delta);
+
+	if (lvl >= 0) {
+		idx = calc_index(expires, lvl, bucket_expiry);
 	} else if ((long) delta < 0) {
 		idx = clk & LVL_MASK;
 		*bucket_expiry = clk;
@@ -545,6 +555,38 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk,
 	return idx;
 }
 
+/**
+ * upper_bound_timeout - return granularity-adjusted timeout
+ * @timeout: timeout value in jiffies
+ *
+ * This function return supplied timeout adjusted based on timer wheel
+ * granularity effectively making supplied value an upper bound at which the
+ * timer will expire. Due to the way timer wheel works timeouts smaller than
+ * LVL_GRAN on their respecrive levels will be _at least_
+ * LVL_GRAN(lvl) - LVL_GRAN(lvl -1)) jiffies early.
+ */
+unsigned long upper_bound_timeout(unsigned long timeout)
+{
+	int lvl = get_wheel_lvl(timeout);
+
+	if (lvl < 0) {
+		if ((long) timeout < 0) {
+			/*
+			 * This will expire immediately so no adjustment
+			 * needed.
+			 */
+			return timeout;
+		} else {
+			if (timeout > WHEEL_TIMEOUT_CUTOFF)
+				timeout = WHEEL_TIMEOUT_CUTOFF;
+			lvl = LVL_DEPTH - 1;
+		}
+	}
+
+	return LVL_GRAN(lvl) > timeout ? 0 : timeout - LVL_GRAN(lvl);
+}
+EXPORT_SYMBOL(upper_bound_timeout);
+
 static void
 trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
 {
-- 
2.34.1

Powered by blists - more mailing lists