lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  9 May 2023 21:07:37 +1000
From:   Nicholas Piggin <npiggin@...il.com>
To:     linux-kernel@...r.kernel.org
Cc:     Nicholas Piggin <npiggin@...il.com>, linux-arch@...r.kernel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Peter Zijlstra <peterz@...radead.org>
Subject: [RFC PATCH 1/3] hrtimer: balance irq save/restore

hrtimers uses a trick where irq flags are saved initially and then
passed to callers to restore irqs when dropping the lock, which then
re-lock with raw_spin_lock_irq.

This is not actually a bug, but it is one of the few places in the
kernel that may disable irqs when they are already disabled. It would
be nice to have a debug check for that and enforce irq calls are
balanced. It's one thing for core code to be clever, but something like
this in a crufty driver could be a red flag.

Signed-off-by: Nicholas Piggin <npiggin@...il.com>
---
 kernel/time/hrtimer.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e8c08292defc..8baa36b091f0 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1364,13 +1364,13 @@ static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
  * allows the waiter to acquire the lock and make progress.
  */
 static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
-				      unsigned long flags)
+				      unsigned long *flags)
 {
 	if (atomic_read(&cpu_base->timer_waiters)) {
-		raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+		raw_spin_unlock_irqrestore(&cpu_base->lock, *flags);
 		spin_unlock(&cpu_base->softirq_expiry_lock);
 		spin_lock(&cpu_base->softirq_expiry_lock);
-		raw_spin_lock_irq(&cpu_base->lock);
+		raw_spin_lock_irqsave(&cpu_base->lock, *flags);
 	}
 }
 
@@ -1424,7 +1424,7 @@ hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
 static inline void
 hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
 static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
-					     unsigned long flags) { }
+					     unsigned long *flags) { }
 #endif
 
 /**
@@ -1642,7 +1642,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
 static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 			  struct hrtimer_clock_base *base,
 			  struct hrtimer *timer, ktime_t *now,
-			  unsigned long flags) __must_hold(&cpu_base->lock)
+			  unsigned long *flags) __must_hold(&cpu_base->lock)
 {
 	enum hrtimer_restart (*fn)(struct hrtimer *);
 	bool expires_in_hardirq;
@@ -1678,7 +1678,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 	 * protected against migration to a different CPU even if the lock
 	 * is dropped.
 	 */
-	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+	raw_spin_unlock_irqrestore(&cpu_base->lock, *flags);
 	trace_hrtimer_expire_entry(timer, now);
 	expires_in_hardirq = lockdep_hrtimer_enter(timer);
 
@@ -1686,7 +1686,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 
 	lockdep_hrtimer_exit(expires_in_hardirq);
 	trace_hrtimer_expire_exit(timer);
-	raw_spin_lock_irq(&cpu_base->lock);
+	raw_spin_lock_irqsave(&cpu_base->lock, *flags);
 
 	/*
 	 * Note: We clear the running state after enqueue_hrtimer and
@@ -1715,7 +1715,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
 }
 
 static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
-				 unsigned long flags, unsigned int active_mask)
+				 unsigned long *flags, unsigned int active_mask)
 {
 	struct hrtimer_clock_base *base;
 	unsigned int active = cpu_base->active_bases & active_mask;
@@ -1763,7 +1763,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
 	raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
 	now = hrtimer_update_base(cpu_base);
-	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
+	__hrtimer_run_queues(cpu_base, now, &flags, HRTIMER_ACTIVE_SOFT);
 
 	cpu_base->softirq_activated = 0;
 	hrtimer_update_softirq_timer(cpu_base, true);
@@ -1808,7 +1808,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 	}
 
-	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+	__hrtimer_run_queues(cpu_base, now, &flags, HRTIMER_ACTIVE_HARD);
 
 	/* Reevaluate the clock bases for the [soft] next expiry */
 	expires_next = hrtimer_update_next_event(cpu_base);
@@ -1921,7 +1921,7 @@ void hrtimer_run_queues(void)
 		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 	}
 
-	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+	__hrtimer_run_queues(cpu_base, now, &flags, HRTIMER_ACTIVE_HARD);
 	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 }
 
-- 
2.40.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ