[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220324091500.2638745-2-asavkov@redhat.com>
Date: Thu, 24 Mar 2022 10:14:59 +0100
From: Artem Savkov <asavkov@...hat.com>
To: tglx@...utronix.de, jpoimboe@...hat.com, netdev@...r.kernel.org
Cc: davem@...emloft.net, yoshfuji@...ux-ipv6.org, dsahern@...nel.org,
linux-kernel@...r.kernel.org, Artem Savkov <asavkov@...hat.com>
Subject: [PATCH v2 1/2] timer: introduce upper bound timers
Current timer wheel implementation is optimized for performance and
energy usage but lacks in precision. This, normally, is not a problem as
most timers that use timer wheel are used for timeouts and thus rarely
expire, instead they often get canceled or modified before expiration.
Even when they don't, expiring a bit late is not an issue for timeout
timers.
TCP keepalive timer is a special case, it's aim is to prevent timeouts,
so triggering earlier rather than later is desired behavior. In a
reported case the user had a 3600s keepalive timer for preventing firewall
disconnects (on a 3650s interval). They observed keepalive timers coming
in up to four minutes late, causing unexpected disconnects.
This commit adds TIMER_UPPER_BOUND flag which allows creation of timers
that would expire at most at specified time or earlier.
This was previously discussed here:
https://lore.kernel.org/all/20210302001054.4qgrvnkltvkgikzr@treble/T/#u
Suggested-by: Josh Poimboeuf <jpoimboe@...hat.com>
Signed-off-by: Artem Savkov <asavkov@...hat.com>
---
include/linux/timer.h | 6 +++++-
kernel/time/timer.c | 36 ++++++++++++++++++++++--------------
2 files changed, 27 insertions(+), 15 deletions(-)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index fda13c9d1256..4b2456501be6 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -60,6 +60,9 @@ struct timer_list {
* function is invoked via mod_timer() or add_timer(). If the timer
* should be placed on a particular CPU, then add_timer_on() has to be
* used.
+ *
+ * @TIMER_UPPER_BOUND: Unlike normal timers which trigger at specified time or
+ * later, upper bound timer will expire at most at specified time or earlier.
*/
#define TIMER_CPUMASK 0x0003FFFF
#define TIMER_MIGRATING 0x00040000
@@ -67,7 +70,8 @@ struct timer_list {
#define TIMER_DEFERRABLE 0x00080000
#define TIMER_PINNED 0x00100000
#define TIMER_IRQSAFE 0x00200000
-#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
+#define TIMER_UPPER_BOUND 0x00400000
+#define TIMER_INIT_FLAGS (TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE | TIMER_UPPER_BOUND)
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021ad459..f4965644d728 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -491,7 +491,7 @@ static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
* time.
*/
static inline unsigned calc_index(unsigned long expires, unsigned lvl,
- unsigned long *bucket_expiry)
+ unsigned long *bucket_expiry, bool upper_bound)
{
/*
@@ -501,34 +501,39 @@ static inline unsigned calc_index(unsigned long expires, unsigned lvl,
* - Truncation of the expiry time in the outer wheel levels
*
* Round up with level granularity to prevent this.
+ * Do not perform round up in case of upper bound timer.
*/
- expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+ if (upper_bound)
+ expires = expires >> LVL_SHIFT(lvl);
+ else
+ expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+
*bucket_expiry = expires << LVL_SHIFT(lvl);
return LVL_OFFS(lvl) + (expires & LVL_MASK);
}
static int calc_wheel_index(unsigned long expires, unsigned long clk,
- unsigned long *bucket_expiry)
+ unsigned long *bucket_expiry, bool upper_bound)
{
unsigned long delta = expires - clk;
unsigned int idx;
if (delta < LVL_START(1)) {
- idx = calc_index(expires, 0, bucket_expiry);
+ idx = calc_index(expires, 0, bucket_expiry, upper_bound);
} else if (delta < LVL_START(2)) {
- idx = calc_index(expires, 1, bucket_expiry);
+ idx = calc_index(expires, 1, bucket_expiry, upper_bound);
} else if (delta < LVL_START(3)) {
- idx = calc_index(expires, 2, bucket_expiry);
+ idx = calc_index(expires, 2, bucket_expiry, upper_bound);
} else if (delta < LVL_START(4)) {
- idx = calc_index(expires, 3, bucket_expiry);
+ idx = calc_index(expires, 3, bucket_expiry, upper_bound);
} else if (delta < LVL_START(5)) {
- idx = calc_index(expires, 4, bucket_expiry);
+ idx = calc_index(expires, 4, bucket_expiry, upper_bound);
} else if (delta < LVL_START(6)) {
- idx = calc_index(expires, 5, bucket_expiry);
+ idx = calc_index(expires, 5, bucket_expiry, upper_bound);
} else if (delta < LVL_START(7)) {
- idx = calc_index(expires, 6, bucket_expiry);
+ idx = calc_index(expires, 6, bucket_expiry, upper_bound);
} else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
- idx = calc_index(expires, 7, bucket_expiry);
+ idx = calc_index(expires, 7, bucket_expiry, upper_bound);
} else if ((long) delta < 0) {
idx = clk & LVL_MASK;
*bucket_expiry = clk;
@@ -540,7 +545,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk,
if (delta >= WHEEL_TIMEOUT_CUTOFF)
expires = clk + WHEEL_TIMEOUT_MAX;
- idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
+ idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry,
+ upper_bound);
}
return idx;
}
@@ -607,7 +613,8 @@ static void internal_add_timer(struct timer_base *base, struct timer_list *timer
unsigned long bucket_expiry;
unsigned int idx;
- idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
+ idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry,
+ timer->flags & TIMER_UPPER_BOUND);
enqueue_timer(base, timer, idx, bucket_expiry);
}
@@ -1000,7 +1007,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
}
clk = base->clk;
- idx = calc_wheel_index(expires, clk, &bucket_expiry);
+ idx = calc_wheel_index(expires, clk, &bucket_expiry,
+ timer->flags & TIMER_UPPER_BOUND);
/*
* Retrieve and compare the array index of the pending
--
2.34.1
Powered by blists - more mailing lists