[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <Y/Ouo4Jog4bInO63@boqun-archlinux>
Date: Mon, 20 Feb 2023 09:32:19 -0800
From: Boqun Feng <boqun.feng@...il.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Alan Stern <stern@...land.harvard.edu>,
Kent Overstreet <kent.overstreet@...ux.dev>,
Kent Overstreet <kent.overstreet@...il.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Coly Li <colyli@...e.de>,
Tetsuo Handa <penguin-kernel@...ove.sakura.ne.jp>,
syzkaller <syzkaller@...glegroups.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Rafael J. Wysocki" <rafael@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
LKML <linux-kernel@...r.kernel.org>,
USB list <linux-usb@...r.kernel.org>,
Hillf Danton <hdanton@...a.com>
Subject: Re: [PATCH RFC] drivers/core: Replace lockdep_set_novalidate_class()
with unique class keys
On Wed, Feb 15, 2023 at 11:45:03AM +0100, Peter Zijlstra wrote:
> On Tue, Feb 14, 2023 at 08:22:28AM -0800, Boqun Feng wrote:
>
> > Ah, right, I was missing the fact that it works with 2 classes...
> >
> > But I think with only one class, the nest_lock() still works, right?
> > In other words, if P and Cn are the same lock class in your example.
After playing with some self test cases, I found I was wrong again ;-(
>
> I don't think so, but I don't think I've carefully considered that case.
>
You are right, the same class case will trigger a DEBUG_LOCKS_WARN_ON()
in the match_held_lock() when releasing the locks.
> > Also seems I gave a wrong answer to Alan, just to clarify, the following
> > is not a deadlock to lockdep:
> >
> > T1:
> > mutex_lock(P)
> > mutex_lock_next_lock(C1, P)
> > mutex_lock_next_lock(C2, P)
> > mutex_lock(B)
> >
> > T2:
> > mutex_lock(P)
> > mutex_lock(B)
> > mutex_lock_next_lock(C1, P)
> > mutex_lock_next_lock(C2, P)
> >
>
> This should in fact complain about a CB-BC deadlock, (but I've not
> tested it, just going on memories of how I implemented it).
>
Yes, confirmed by a selftest.
> > Because of any pair of
> >
> > mutex_lock(L);
> > ... // other locks maybe
> > mutex_lock_nest_lock(M, L);
> >
> > lockdep will not add M into the dependency graph, since it's nested and
> > should be serialized by L.
>
> We do enter M into the dependency graph, but instead ignore M-M
> recursion. Specifically so that we might catch the above deadlock vs B.
Right, I mis-read the code, which suggests I should improve it to help
the future me ;-)
FWIW, the selftests I used are as follow:
Regards,
Boqun
------------------------------->8
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 8d24279fad05..6aadebad68c1 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -60,6 +60,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_RTMUTEX 0x20
#define LOCKTYPE_LL 0x40
#define LOCKTYPE_SPECIAL 0x80
+#define LOCKTYPE_NEST 0x100
static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3;
@@ -2091,14 +2092,14 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
ww_mutex_lock_slow(&o3, &t);
}
-static void ww_test_spin_nest_unlocked(void)
+static void nest_test_spin_nest_unlocked(void)
{
spin_lock_nest_lock(&lock_A, &o.base);
U(A);
}
/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
-static void ww_test_spin_nest_lock(void)
+static void nest_test_spin_nest_lock(void)
{
spin_lock(&lock_X1);
spin_lock_nest_lock(&lock_Y1, &lock_X1);
@@ -2110,6 +2111,33 @@ static void ww_test_spin_nest_lock(void)
spin_unlock(&lock_X1);
}
+static void nest_test_spin_nest_lock_deadlock(void)
+{
+ nest_test_spin_nest_lock();
+
+ /*
+ * Although above is not a deadlokc, but with the following code, Y1 and
+ * A create a ABBA deadlock.
+ */
+ spin_lock(&lock_X1);
+ spin_lock(&lock_A);
+ spin_lock_nest_lock(&lock_Y1, &lock_X1);
+ spin_lock_nest_lock(&lock_Y2, &lock_X1);
+ spin_unlock(&lock_A);
+ spin_unlock(&lock_Y2);
+ spin_unlock(&lock_Y1);
+ spin_unlock(&lock_X1);
+}
+
+/* Not the supported usage */
+static void nest_test_spin_nest_lock_same_class(void)
+{
+ spin_lock(&lock_X1);
+ spin_lock_nest_lock(&lock_X2, &lock_X1);
+ spin_unlock(&lock_X2);
+ spin_unlock(&lock_X1);
+}
+
static void ww_test_unneeded_slow(void)
{
WWAI(&t);
@@ -2323,14 +2351,6 @@ static void ww_tests(void)
dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
pr_cont("\n");
- print_testname("spinlock nest unlocked");
- dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
- pr_cont("\n");
-
- print_testname("spinlock nest test");
- dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
- pr_cont("\n");
-
printk(" -----------------------------------------------------\n");
printk(" |block | try |context|\n");
printk(" -----------------------------------------------------\n");
@@ -2360,6 +2380,27 @@ static void ww_tests(void)
pr_cont("\n");
}
+static void nest_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | nest lock tests |\n");
+ printk(" -------------------\n");
+ print_testname("spinlock nest unlocked");
+ dotest(nest_test_spin_nest_unlocked, FAILURE, LOCKTYPE_NEST);
+ pr_cont("\n");
+
+ print_testname("spinlock nest test");
+ dotest(nest_test_spin_nest_lock, SUCCESS, LOCKTYPE_NEST);
+ pr_cont("\n");
+ print_testname("spinlock nest test dead lock");
+ dotest(nest_test_spin_nest_lock_deadlock, FAILURE, LOCKTYPE_NEST);
+ pr_cont("\n");
+ print_testname("spinlock nest test dead lock");
+ dotest(nest_test_spin_nest_lock_same_class, FAILURE, LOCKTYPE_NEST);
+ pr_cont("\n");
+
+}
+
/*
* <in hardirq handler>
@@ -2966,6 +3007,8 @@ void locking_selftest(void)
ww_tests();
+ nest_tests();
+
force_read_lock_recursive = 0;
/*
* queued_read_lock() specific test cases can be put here
Powered by blists - more mailing lists