[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <173746273180.1271204.13997769287296636421.tglx@xen13>
Date: Tue, 21 Jan 2025 13:32:29 +0100 (CET)
From: Thomas Gleixner <tglx@...utronix.de>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org
Subject: [GIT pull] timers/core for 6.14-rc1
Linus,
please pull the latest timers/core branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-core-2025-01-21
up to: dcf6230555dc: timers/migration: Simplify top level detection on group setup
Updates for timers and timekeeping:
- Just boring cleanups, typo and comment fixes and trivial optimizations
Thanks,
tglx
------------------>
Dr. David Alan Gilbert (1):
timekeeping: Remove unused ktime_get_fast_timestamps()
Frederic Weisbecker (1):
timers/migration: Simplify top level detection on group setup
Haiyue Wang (1):
vdso: Correct typo in PAGE_SHIFT comment
Paul E. McKenney (1):
clocksource/wdtest: Print time values for short udelay(1)
Randy Dunlap (2):
tick/broadcast: Add kernel-doc for function parameters
timer/migration: Fix kernel-doc warnings for union tmigr_state
Richard Clark (1):
hrtimers: Update the return type of enqueue_hrtimer()
Zhongqiu Han (1):
timers: Optimize get_timer_[this_]cpu_base()
Zhu Jun (1):
posix-timers: Fix typo in __lock_timer()
include/linux/timekeeping.h | 15 --------
include/vdso/page.h | 2 +-
kernel/time/clocksource-wdtest.c | 3 +-
kernel/time/hrtimer.c | 7 ++--
kernel/time/posix-timers.c | 2 +-
kernel/time/tick-broadcast.c | 2 ++
kernel/time/timekeeping.c | 77 +++++-----------------------------------
kernel/time/timer.c | 16 ++++-----
kernel/time/timer_migration.c | 4 +--
kernel/time/timer_migration.h | 21 +++++------
10 files changed, 33 insertions(+), 116 deletions(-)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 0e035f675efe..542773650200 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -263,18 +263,6 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
-/**
- * struct ktime_timestamps - Simultaneous mono/boot/real timestamps
- * @mono: Monotonic timestamp
- * @boot: Boottime timestamp
- * @real: Realtime timestamp
- */
-struct ktime_timestamps {
- u64 mono;
- u64 boot;
- u64 real;
-};
-
/**
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
@@ -345,9 +333,6 @@ extern int get_device_system_crosststamp(
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
-/* NMI safe mono/boot/realtime timestamps */
-extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap);
-
/*
* Persistent clock related interfaces
*/
diff --git a/include/vdso/page.h b/include/vdso/page.h
index 710ae2414e68..bc47186c07fc 100644
--- a/include/vdso/page.h
+++ b/include/vdso/page.h
@@ -8,7 +8,7 @@
* PAGE_SHIFT determines the page size.
*
* Note: This definition is required because PAGE_SHIFT is used
- * in several places throuout the codebase.
+ * in several places throughout the codebase.
*/
#define PAGE_SHIFT CONFIG_PAGE_SHIFT
diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c
index 62e73444ffe4..38dae590b29f 100644
--- a/kernel/time/clocksource-wdtest.c
+++ b/kernel/time/clocksource-wdtest.c
@@ -137,7 +137,8 @@ static int wdtest_func(void *arg)
udelay(1);
j2 = clocksource_wdtest_ktime.read(&clocksource_wdtest_ktime);
pr_info("--- tsc-like times: %lu - %lu = %lu.\n", j2, j1, j2 - j1);
- WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC));
+ WARN_ONCE(time_before(j2, j1 + NSEC_PER_USEC),
+ "Expected at least 1000ns, got %lu.\n", j2 - j1);
/* Verify tsc-like stability with various numbers of errors injected. */
max_retries = clocksource_get_max_watchdog_retry();
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 80fe3749d2db..b026fd481dce 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1067,11 +1067,10 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
* The timer is inserted in expiry order. Insertion into the
* red black tree is O(log(n)). Must hold the base lock.
*
- * Returns 1 when the new timer is the leftmost timer in the tree.
+ * Returns true when the new timer is the leftmost timer in the tree.
*/
-static int enqueue_hrtimer(struct hrtimer *timer,
- struct hrtimer_clock_base *base,
- enum hrtimer_mode mode)
+static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ enum hrtimer_mode mode)
{
debug_activate(timer, mode);
WARN_ON_ONCE(!base->cpu_base->online);
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 881a9ce96af7..1b675aee99a9 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -538,7 +538,7 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
* When the reference count reaches zero, the timer is scheduled
* for RCU removal after the grace period.
*
- * Holding rcu_read_lock() accross the lookup ensures that
+ * Holding rcu_read_lock() across the lookup ensures that
* the timer cannot be freed.
*
* The lookup validates locklessly that timr::it_signal ==
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index ed58eebb4e8f..0207868c8b4d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -1020,6 +1020,8 @@ static inline ktime_t tick_get_next_period(void)
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
+ * @bc: the broadcast device
+ * @from_periodic: true if called from periodic mode
*/
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc,
bool from_periodic)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3d128825d343..1e67d076f195 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -485,90 +485,29 @@ u64 notrace ktime_get_tai_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_tai_fast_ns);
-static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
+/**
+ * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+ *
+ * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
+ */
+u64 ktime_get_real_fast_ns(void)
{
+ struct tk_fast *tkf = &tk_fast_mono;
struct tk_read_base *tkr;
- u64 basem, baser, delta;
+ u64 baser, delta;
unsigned int seq;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
- basem = ktime_to_ns(tkr->base);
baser = ktime_to_ns(tkr->base_real);
delta = timekeeping_get_ns(tkr);
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
- if (mono)
- *mono = basem + delta;
return baser + delta;
}
-
-/**
- * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
- *
- * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
- */
-u64 ktime_get_real_fast_ns(void)
-{
- return __ktime_get_real_fast(&tk_fast_mono, NULL);
-}
EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
-/**
- * ktime_get_fast_timestamps: - NMI safe timestamps
- * @snapshot: Pointer to timestamp storage
- *
- * Stores clock monotonic, boottime and realtime timestamps.
- *
- * Boot time is a racy access on 32bit systems if the sleep time injection
- * happens late during resume and not in timekeeping_resume(). That could
- * be avoided by expanding struct tk_read_base with boot offset for 32bit
- * and adding more overhead to the update. As this is a hard to observe
- * once per resume event which can be filtered with reasonable effort using
- * the accurate mono/real timestamps, it's probably not worth the trouble.
- *
- * Aside of that it might be possible on 32 and 64 bit to observe the
- * following when the sleep time injection happens late:
- *
- * CPU 0 CPU 1
- * timekeeping_resume()
- * ktime_get_fast_timestamps()
- * mono, real = __ktime_get_real_fast()
- * inject_sleep_time()
- * update boot offset
- * boot = mono + bootoffset;
- *
- * That means that boot time already has the sleep time adjustment, but
- * real time does not. On the next readout both are in sync again.
- *
- * Preventing this for 64bit is not really feasible without destroying the
- * careful cache layout of the timekeeper because the sequence count and
- * struct tk_read_base would then need two cache lines instead of one.
- *
- * Access to the time keeper clock source is disabled across the innermost
- * steps of suspend/resume. The accessors still work, but the timestamps
- * are frozen until time keeping is resumed which happens very early.
- *
- * For regular suspend/resume there is no observable difference vs. sched
- * clock, but it might affect some of the nasty low level debug printks.
- *
- * OTOH, access to sched clock is not guaranteed across suspend/resume on
- * all systems either so it depends on the hardware in use.
- *
- * If that turns out to be a real problem then this could be mitigated by
- * using sched clock in a similar way as during early boot. But it's not as
- * trivial as on early boot because it needs some careful protection
- * against the clock monotonic timestamp jumping backwards on resume.
- */
-void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot)
-{
- struct timekeeper *tk = &tk_core.timekeeper;
-
- snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono);
- snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot));
-}
-
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index a5860bf6d16f..40706cb36920 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -956,33 +956,29 @@ static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
{
int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
- struct timer_base *base;
-
- base = per_cpu_ptr(&timer_bases[index], cpu);
/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
* to use the deferrable base.
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
- base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
- return base;
+ index = BASE_DEF;
+
+ return per_cpu_ptr(&timer_bases[index], cpu);
}
static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
{
int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
- struct timer_base *base;
-
- base = this_cpu_ptr(&timer_bases[index]);
/*
* If the timer is deferrable and NO_HZ_COMMON is set then we need
* to use the deferrable base.
*/
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
- base = this_cpu_ptr(&timer_bases[BASE_DEF]);
- return base;
+ index = BASE_DEF;
+
+ return this_cpu_ptr(&timer_bases[index]);
}
static inline struct timer_base *get_timer_base(u32 tflags)
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 8d57f7686bb0..61633762f6bf 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1624,9 +1624,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
* be different from tmigr_hierarchy_levels, contains only a
* single group.
*/
- if (group->parent || i == tmigr_hierarchy_levels ||
- (list_empty(&tmigr_level_list[i]) &&
- list_is_singular(&tmigr_level_list[i - 1])))
+ if (group->parent || list_is_singular(&tmigr_level_list[i - 1]))
break;
} while (i < tmigr_hierarchy_levels);
diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h
index 154accc7a543..ae19f70f8170 100644
--- a/kernel/time/timer_migration.h
+++ b/kernel/time/timer_migration.h
@@ -110,22 +110,19 @@ struct tmigr_cpu {
* union tmigr_state - state of tmigr_group
* @state: Combined version of the state - only used for atomic
* read/cmpxchg function
- * @struct: Split version of the state - only use the struct members to
+ * &anon struct: Split version of the state - only use the struct members to
* update information to stay independent of endianness
+ * @active: Contains each mask bit of the active children
+ * @migrator: Contains mask of the child which is migrator
+ * @seq: Sequence counter needs to be increased when an update
+ * to the tmigr_state is done. It prevents a race when
+ * updates in the child groups are propagated in changed
+ * order. Detailed information about the scenario is
+ * given in the documentation at the begin of
+ * timer_migration.c.
*/
union tmigr_state {
u32 state;
- /**
- * struct - split state of tmigr_group
- * @active: Contains each mask bit of the active children
- * @migrator: Contains mask of the child which is migrator
- * @seq: Sequence counter needs to be increased when an update
- * to the tmigr_state is done. It prevents a race when
- * updates in the child groups are propagated in changed
- * order. Detailed information about the scenario is
- * given in the documentation at the begin of
- * timer_migration.c.
- */
struct {
u8 active;
u8 migrator;
Powered by blists - more mailing lists