lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240613142929.132220-1-longman@redhat.com>
Date: Thu, 13 Jun 2024 10:29:29 -0400
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...hat.com>,
	Will Deacon <will.deacon@....com>,
	Boqun Feng <boqun.feng@...il.com>
Cc: linux-kernel@...r.kernel.org,
	David Arcari <darcari@...hat.com>,
	Waiman Long <longman@...hat.com>
Subject: [PATCH] lockdep: Improve consistency between lockdep and hardware hardirqs states

There are occasional instances where a lockdep splat complains about
incorrect hardirqs state even when the hardware hardirqs state as
shown in the flags register is correct.  This can happen when a
local_irq_disable() is followed by a raw_local_irq_enable(), for
instance.

It can be hard to catch this kind of mismatch as the offending call
site can be far apart from the place where the lockdep splat happens.
Add more lockdep checks into local_irq_disable() and local_irq_save()
when trace_hardirqs_off() is not being called with the hope that lockdep
splat can happen closer to the offending site.

The irqflags related lockdep code in lockdep.h are extracted into a
separate lockdep_irqflags.h header file that can be included by
irqflags.h safely to make that possible.

There shouldn't be any overhead if CONFIG_PROVE_LOCKING isn't set.

Signed-off-by: Waiman Long <longman@...hat.com>
---
 include/linux/irqflags.h         |  6 +++
 include/linux/lockdep.h          | 64 +--------------------------
 include/linux/lockdep_irqflags.h | 76 ++++++++++++++++++++++++++++++++
 3 files changed, 83 insertions(+), 63 deletions(-)
 create mode 100644 include/linux/lockdep_irqflags.h

diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3f003d5fde53..89c951c34a6a 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -191,6 +191,8 @@ extern void warn_bogus_irq_restore(void);
  */
 #ifdef CONFIG_TRACE_IRQFLAGS
 
+#include <linux/lockdep_irqflags.h>
+
 #define local_irq_enable()				\
 	do {						\
 		trace_hardirqs_on();			\
@@ -203,6 +205,8 @@ extern void warn_bogus_irq_restore(void);
 		raw_local_irq_disable();		\
 		if (!was_disabled)			\
 			trace_hardirqs_off();		\
+		else					\
+			lockdep_assert_irqs_disabled();	\
 	} while (0)
 
 #define local_irq_save(flags)				\
@@ -210,6 +214,8 @@ extern void warn_bogus_irq_restore(void);
 		raw_local_irq_save(flags);		\
 		if (!raw_irqs_disabled_flags(flags))	\
 			trace_hardirqs_off();		\
+		else					\
+			lockdep_assert_irqs_disabled();	\
 	} while (0)
 
 #define local_irq_restore(flags)			\
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 08b0d1d9d78b..8a2b288bc866 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -11,8 +11,8 @@
 #define __LINUX_LOCKDEP_H
 
 #include <linux/lockdep_types.h>
+#include <linux/lockdep_irqflags.h>
 #include <linux/smp.h>
-#include <asm/percpu.h>
 
 struct task_struct;
 
@@ -547,72 +547,10 @@ do {									\
 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
 } while (0)
 
-DECLARE_PER_CPU(int, hardirqs_enabled);
-DECLARE_PER_CPU(int, hardirq_context);
-DECLARE_PER_CPU(unsigned int, lockdep_recursion);
-
-#define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
-
-#define lockdep_assert_irqs_enabled()					\
-do {									\
-	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
-} while (0)
-
-#define lockdep_assert_irqs_disabled()					\
-do {									\
-	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
-} while (0)
-
-#define lockdep_assert_in_irq()						\
-do {									\
-	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
-} while (0)
-
-#define lockdep_assert_no_hardirq()					\
-do {									\
-	WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
-					   !this_cpu_read(hardirqs_enabled))); \
-} while (0)
-
-#define lockdep_assert_preemption_enabled()				\
-do {									\
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
-		     __lockdep_enabled			&&		\
-		     (preempt_count() != 0		||		\
-		      !this_cpu_read(hardirqs_enabled)));		\
-} while (0)
-
-#define lockdep_assert_preemption_disabled()				\
-do {									\
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
-		     __lockdep_enabled			&&		\
-		     (preempt_count() == 0		&&		\
-		      this_cpu_read(hardirqs_enabled)));		\
-} while (0)
-
-/*
- * Acceptable for protecting per-CPU resources accessed from BH.
- * Much like in_softirq() - semantics are ambiguous, use carefully.
- */
-#define lockdep_assert_in_softirq()					\
-do {									\
-	WARN_ON_ONCE(__lockdep_enabled			&&		\
-		     (!in_softirq() || in_irq() || in_nmi()));		\
-} while (0)
-
 #else
 # define might_lock(lock) do { } while (0)
 # define might_lock_read(lock) do { } while (0)
 # define might_lock_nested(lock, subclass) do { } while (0)
-
-# define lockdep_assert_irqs_enabled() do { } while (0)
-# define lockdep_assert_irqs_disabled() do { } while (0)
-# define lockdep_assert_in_irq() do { } while (0)
-# define lockdep_assert_no_hardirq() do { } while (0)
-
-# define lockdep_assert_preemption_enabled() do { } while (0)
-# define lockdep_assert_preemption_disabled() do { } while (0)
-# define lockdep_assert_in_softirq() do { } while (0)
 #endif
 
 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/include/linux/lockdep_irqflags.h b/include/linux/lockdep_irqflags.h
new file mode 100644
index 000000000000..d8633c1839d9
--- /dev/null
+++ b/include/linux/lockdep_irqflags.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_LOCKDEP_IRQFLAGS_H
+#define __LINUX_LOCKDEP_IRQFLAGS_H
+
+#include <linux/debug_locks.h>
+#include <linux/kconfig.h>
+#include <linux/preempt.h>
+#include <asm/bug.h>
+#include <asm/percpu.h>
+
+#ifdef CONFIG_PROVE_LOCKING
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
+
+#define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
+
+#define lockdep_assert_irqs_enabled()					\
+do {									\
+	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_irqs_disabled()					\
+do {									\
+	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_in_irq()						\
+do {									\
+	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
+} while (0)
+
+#define lockdep_assert_no_hardirq()					\
+do {									\
+	WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
+					   !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+#define lockdep_assert_preemption_enabled()				\
+do {									\
+	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
+		     __lockdep_enabled			&&		\
+		     (preempt_count() != 0		||		\
+		      !this_cpu_read(hardirqs_enabled)));		\
+} while (0)
+
+#define lockdep_assert_preemption_disabled()				\
+do {									\
+	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
+		     __lockdep_enabled			&&		\
+		     (preempt_count() == 0		&&		\
+		      this_cpu_read(hardirqs_enabled)));		\
+} while (0)
+
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq()					\
+do {									\
+	WARN_ON_ONCE(__lockdep_enabled			&&		\
+		     (!in_softirq() || in_irq() || in_nmi()));		\
+} while (0)
+
+#else
+# define lockdep_assert_irqs_enabled() do { } while (0)
+# define lockdep_assert_irqs_disabled() do { } while (0)
+# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_no_hardirq() do { } while (0)
+
+# define lockdep_assert_preemption_enabled() do { } while (0)
+# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
+#endif
+
+#endif /* __LINUX_LOCKDEP_IRQFLAGS_H */
-- 
2.39.3


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ