[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aSDoquGlA55Ge100@tardis.local>
Date: Fri, 21 Nov 2025 14:33:14 -0800
From: Boqun Feng <boqun.feng@...il.com>
To: Nathan Chancellor <nathan@...nel.org>
Cc: Stephen Rothwell <sfr@...b.auug.org.au>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Andrew Morton <akpm@...ux-foundation.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Linux Next Mailing List <linux-next@...r.kernel.org>,
Waiman Long <llong@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>, Will Deacon <will@...nel.org>
Subject: Re: linux-next: boot warning from the final tree
On Fri, Nov 21, 2025 at 02:58:19PM -0700, Nathan Chancellor wrote:
> On Mon, Nov 17, 2025 at 08:22:14PM +1100, Stephen Rothwell wrote:
> > Hi all,
> >
> > Today's linux-next qemu boot (powerpc pseries_le_defconfig) produced
> > this warning:
> >
> > ftrace: allocating 48915 entries in 288 pages
> > ftrace: allocated 287 pages with 6 groups
> > ------------[ cut here ]------------
> > DEBUG_LOCKS_WARN_ON(lock->magic != lock)
> > WARNING: kernel/locking/mutex.c:156 at mutex_lock+0xcc/0x100, CPU#0: swapper/0/0
> > Modules linked in:
> > CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted 6.18.0-rc6-09359-g921087e37218 #1 VOLUNTARY
> > Hardware name: IBM pSeries (emulated by qemu) POWER9 (architected) 0x4e1202 0xf000005 of:SLOF,HEAD hv:linux,kvm pSeries
> > NIP: c00000000148041c LR: c000000001480418 CTR: 0000000000000000
> > REGS: c000000002957a10 TRAP: 0700 Not tainted (6.18.0-rc6-09359-g921087e37218)
> > MSR: 8000000002021033 <SF,VEC,ME,IR,DR,RI,LE> CR: 24022240 XER: 00000000
> > CFAR: c00000000021123c IRQMASK: 3
> > GPR00: c000000001480418 c000000002957cb0 c000000001a3a100 0000000000000028
> > GPR04: 00000000ffffe04a c0000000026abe88 0000000000000001 000000000000004b
> > GPR08: c0000000026abd28 0000000000000000 0000000000000000 0000000044022240
> > GPR12: 0000000000000000 c000000002ae9000 0000000000000000 0000000001bff430
> > GPR16: 000000007e68f070 c00000007f79c480 c000000002969160 c000000002a0f5d8
> > GPR20: c0000000026a1138 c0000000026a1120 0000000000000000 c0000000019541b8
> > GPR24: c00000000218a480 c00000000296e1d0 000000007d612000 c00000000380be10
> > GPR28: c00000000380be20 c00000000380be00 c000000002640100 c00000000380be20
> > NIP [c00000000148041c] mutex_lock+0xcc/0x100
> > LR [c000000001480418] mutex_lock+0xc8/0x100
> > Call Trace:
> > [c000000002957cb0] [c000000001480418] mutex_lock+0xc8/0x100 (unreliable)
> > [c000000002957d20] [c00000000024a60c] alloc_workqueue_noprof+0x38c/0x8ec
> > [c000000002957e00] [c00000000203018c] workqueue_init_early+0x4d8/0x6ec
> > [c000000002957f30] [c000000002004448] start_kernel+0x74c/0xa4c
> > [c000000002957fe0] [c00000000000e99c] start_here_common+0x1c/0x20
> > Code: 4182ffb4 3d2200f3 392971e4 81290000 2c090000 4082ffa0 3c82ffe0 3c62ffe0 3884bfe0 3863bf68 4ad90d45 60000000 <0fe00000> 4bffff80 60000000 60000000
> > ---[ end trace 0000000000000000 ]---
> > rcu: Hierarchical RCU implementation.
> >
> > I have no idea what caused this.
>
> I noticed this warning in my QEMU boot tests as well and bisected it
> down to commit 3572e2edc7b6 ("locking/mutex: Redo __mutex_init()").
>
> $ make -skj"$(nproc)" ARCH=powerpc CROSS_COMPILE=powerpc64-linux- clean ppc64le_guest_defconfig zImage.epapr
>
> $ curl -LSs https://github.com/ClangBuiltLinux/boot-utils/releases/download/20241120-044434/ppc64le-rootfs.cpio.zst | zstd -d >rootfs.cpio
>
> $ qemu-system-ppc64 \
> -display none \
> -nodefaults \
> -device ipmi-bmc-sim,id=bmc0 \
> -device isa-ipmi-bt,bmc=bmc0,irq=10 \
> -machine powernv \
> -kernel arch/powerpc/boot/zImage.epapr \
> -initrd rootfs.cpio \
> -m 2G \
> -serial mon:stdio
> ...
> [ 0.000000][ T0] Linux version 6.18.0-rc2-00016-g3572e2edc7b6 (nathan@...62) (powerpc64-linux-gcc (GCC) 15.2.0, GNU ld (GNU Binutils) 2.45) #1 SMP Fri Nov 21 13:55:26 MST 2025
> ...
> [ 0.000000][ T0] ------------[ cut here ]------------
> [ 0.000000][ T0] DEBUG_LOCKS_WARN_ON(lock->magic != lock)
> [ 0.000000][ T0] WARNING: CPU: 0 PID: 0 at kernel/locking/mutex.c:156 mutex_lock+0xd4/0x100
> [ 0.000000][ T0] Modules linked in:
> [ 0.000000][ T0] CPU: 0 UID: 0 PID: 0 Comm: swapper/0 Not tainted 6.18.0-rc2-00016-g3572e2edc7b6 #1 VOLUNTARY
> [ 0.000000][ T0] Hardware name: IBM PowerNV (emulated by qemu) POWER10 0x801200 opal:v7.1-106-g785a5e307 PowerNV
> [ 0.000000][ T0] NIP: c0000000014b2974 LR: c0000000014b2970 CTR: 0000000000000000
> [ 0.000000][ T0] REGS: c0000000029979f0 TRAP: 0700 Not tainted (6.18.0-rc2-00016-g3572e2edc7b6)
> [ 0.000000][ T0] MSR: 9000000002021033 <SF,HV,VEC,ME,IR,DR,RI,LE> CR: 24000220 XER: 00000000
> [ 0.000000][ T0] CFAR: c00000000021ed7c IRQMASK: 3
> [ 0.000000][ T0] GPR00: c0000000014b2970 c000000002997c90 c000000001a78100 0000000000000028
> [ 0.000000][ T0] GPR04: 00000000ffffe04a c0000000026ed958 0000000000000001 000000000000004b
> [ 0.000000][ T0] GPR08: c0000000026ed7f0 0000000000000000 0000000000000000 0000000044000220
> [ 0.000000][ T0] GPR12: c0000000026ed880 c000000002ba0000 0000000000000018 0000000000000000
> [ 0.000000][ T0] GPR16: 0000000000000000 c0000000026e2b88 c0000000026e2ba0 c00000007be5a400
> [ 0.000000][ T0] GPR20: c0000000029ed0e0 c000000002aaf7e0 0000000000000000 c0000000019911b8
> [ 0.000000][ T0] GPR24: c0000000021ca400 c0000000029f2150 0000000079c90000 c000000003081410
> [ 0.000000][ T0] GPR28: c000000003081420 c000000003081400 c0000000021cce98 c000000003081420
> [ 0.000000][ T0] NIP [c0000000014b2974] mutex_lock+0xd4/0x100
> [ 0.000000][ T0] LR [c0000000014b2970] mutex_lock+0xd0/0x100
> [ 0.000000][ T0] Call Trace:
> [ 0.000000][ T0] [c000000002997c90] [c0000000014b2970] mutex_lock+0xd0/0x100 (unreliable)
> [ 0.000000][ T0] [c000000002997d10] [c000000000258ddc] alloc_workqueue_noprof+0x44c/0x8c8
> [ 0.000000][ T0] [c000000002997df0] [c00000000203080c] workqueue_init_early+0x4e4/0x700
> [ 0.000000][ T0] [c000000002997f30] [c000000002004388] start_kernel+0x638/0x938
> [ 0.000000][ T0] [c000000002997fe0] [c00000000000e99c] start_here_common+0x1c/0x20
> [ 0.000000][ T0] Code: 4182ffa8 3d2200f8 3929d134 81290000 2c090000 4082ff94 3c82ffde 3c62ffde 38846d98 38636d20 4ad6c32d 60000000 <0fe00000> e9410068 4bffff70 38210080
> [ 0.000000][ T0] ---[ end trace 0000000000000000 ]---
> ...
>
> At the parent change, there is no warning.
>
Thank you both, seems we missed the case where LOCKDEP=n but
DEBUG_MUTEXES=y, I feel like the following should be the correct fix.
Regards,
Boqun
------------->8
Subject: [PATCH] locking/mutex: Initialize mutex::magic even when LOCKDEP=n
When DEBUG_MUTEXES=y and LOCKDEP=n, mutex_lock() still checks on
->magic, hence debug_mutex_init() should be called in
mutex_init_generic() as well. While we are at it, decouple LOCKDEP
logic from debug_mutex_init(), because in this way debug_mutex_init()
only needs one parameter, and we now have mutex_init_lockep() for
LOCKDEP=y scenarios.
Reported-by: Stephen Rothwell <sfr@...b.auug.org.au>
Closes: https://lore.kernel.org/lkml/20251117202214.4f710f02@canb.auug.org.au/
Reported-by: Nathan Chancellor <nathan@...nel.org>
Closes: https://lore.kernel.org/lkml/20251121215819.GA1374726@ax162/
Fixes: 3572e2edc7b6 ("locking/mutex: Redo __mutex_init()")
Signed-off-by: Boqun Feng <boqun.feng@...il.com>
---
kernel/locking/mutex-debug.c | 10 +---------
kernel/locking/mutex.c | 8 +++++++-
kernel/locking/mutex.h | 5 ++---
3 files changed, 10 insertions(+), 13 deletions(-)
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 949103fd8e9b..2c6b02d4699b 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -78,16 +78,8 @@ void debug_mutex_unlock(struct mutex *lock)
}
}
-void debug_mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key)
+void debug_mutex_init(struct mutex *lock)
{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
-#endif
lock->magic = lock;
}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index f3bb352a368d..2a1d165b3167 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -51,6 +51,7 @@ static void __mutex_init_generic(struct mutex *lock)
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
+ debug_mutex_init(lock);
}
static inline struct task_struct *__owner_task(unsigned long owner)
@@ -173,7 +174,12 @@ static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key)
{
__mutex_init_generic(lock);
- debug_mutex_init(lock, name, key);
+
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
}
EXPORT_SYMBOL(mutex_init_lockep);
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 2e8080a9bee3..9ad4da8cea00 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -59,8 +59,7 @@ extern void debug_mutex_add_waiter(struct mutex *lock,
extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
-extern void debug_mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
+extern void debug_mutex_init(struct mutex *lock);
#else /* CONFIG_DEBUG_MUTEXES */
# define debug_mutex_lock_common(lock, waiter) do { } while (0)
# define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
@@ -68,6 +67,6 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
# define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
# define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
# define debug_mutex_unlock(lock) do { } while (0)
-# define debug_mutex_init(lock, name, key) do { } while (0)
+# define debug_mutex_init(lock) do { } while (0)
#endif /* !CONFIG_DEBUG_MUTEXES */
#endif /* CONFIG_PREEMPT_RT */
--
2.50.1 (Apple Git-155)
Powered by blists - more mailing lists