[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250801023358.562525-2-jstultz@google.com>
Date: Fri, 1 Aug 2025 02:33:47 +0000
From: John Stultz <jstultz@...gle.com>
To: LKML <linux-kernel@...r.kernel.org>
Cc: John Stultz <jstultz@...gle.com>, Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>, Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>, "Paul E . McKenney" <paulmck@...nel.org>,
Joel Fernandes <joelagnelf@...dia.com>, Dietmar Eggemann <dietmar.eggemann@....com>,
Suleiman Souhlal <suleiman@...gle.com>, kernel-team@...roid.com
Subject: [PATCH 1/3] test-ww_mutex: Extend ww_mutex tests to test both classes
of ww_mutexes
Currently the test-ww_mutex tool only utilizes the wait-die
class of ww_mutexes, and thus isn't very helpful in exercising
the wait-wound class of ww_mutexes.
So extend the test to exercise both classes of ww_mutexes for
all of the subtests.
Signed-off-by: John Stultz <jstultz@...gle.com>
---
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Will Deacon <will@...nel.org>
Cc: Waiman Long <longman@...hat.com>
Cc: Boqun Feng <boqun.feng@...il.com>
Cc: "Paul E . McKenney" <paulmck@...nel.org>
Cc: Joel Fernandes <joelagnelf@...dia.com>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Suleiman Souhlal <suleiman@...gle.com>
Cc: kernel-team@...roid.com
---
kernel/locking/test-ww_mutex.c | 114 +++++++++++++++++++++------------
1 file changed, 73 insertions(+), 41 deletions(-)
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index bcb1b9fea5880..20f509ca17e16 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -13,7 +13,8 @@
#include <linux/slab.h>
#include <linux/ww_mutex.h>
-static DEFINE_WD_CLASS(ww_class);
+static DEFINE_WD_CLASS(wd_class);
+static DEFINE_WW_CLASS(ww_class);
struct workqueue_struct *wq;
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
@@ -54,16 +55,16 @@ static void test_mutex_work(struct work_struct *work)
ww_mutex_unlock(&mtx->mutex);
}
-static int __test_mutex(unsigned int flags)
+static int __test_mutex(struct ww_class *class, unsigned int flags)
{
#define TIMEOUT (HZ / 16)
struct test_mutex mtx;
struct ww_acquire_ctx ctx;
int ret;
- ww_mutex_init(&mtx.mutex, &ww_class);
+ ww_mutex_init(&mtx.mutex, class);
if (flags & TEST_MTX_CTX)
- ww_acquire_init(&ctx, &ww_class);
+ ww_acquire_init(&ctx, class);
INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
init_completion(&mtx.ready);
@@ -106,13 +107,13 @@ static int __test_mutex(unsigned int flags)
#undef TIMEOUT
}
-static int test_mutex(void)
+static int test_mutex(struct ww_class *class)
{
int ret;
int i;
for (i = 0; i < __TEST_MTX_LAST; i++) {
- ret = __test_mutex(i);
+ ret = __test_mutex(class, i);
if (ret)
return ret;
}
@@ -120,15 +121,15 @@ static int test_mutex(void)
return 0;
}
-static int test_aa(bool trylock)
+static int test_aa(struct ww_class *class, bool trylock)
{
struct ww_mutex mutex;
struct ww_acquire_ctx ctx;
int ret;
const char *from = trylock ? "trylock" : "lock";
- ww_mutex_init(&mutex, &ww_class);
- ww_acquire_init(&ctx, &ww_class);
+ ww_mutex_init(&mutex, class);
+ ww_acquire_init(&ctx, class);
if (!trylock) {
ret = ww_mutex_lock(&mutex, &ctx);
@@ -177,6 +178,7 @@ static int test_aa(bool trylock)
struct test_abba {
struct work_struct work;
+ struct ww_class *class;
struct ww_mutex a_mutex;
struct ww_mutex b_mutex;
struct completion a_ready;
@@ -191,7 +193,7 @@ static void test_abba_work(struct work_struct *work)
struct ww_acquire_ctx ctx;
int err;
- ww_acquire_init_noinject(&ctx, &ww_class);
+ ww_acquire_init_noinject(&ctx, abba->class);
if (!abba->trylock)
ww_mutex_lock(&abba->b_mutex, &ctx);
else
@@ -217,23 +219,24 @@ static void test_abba_work(struct work_struct *work)
abba->result = err;
}
-static int test_abba(bool trylock, bool resolve)
+static int test_abba(struct ww_class *class, bool trylock, bool resolve)
{
struct test_abba abba;
struct ww_acquire_ctx ctx;
int err, ret;
- ww_mutex_init(&abba.a_mutex, &ww_class);
- ww_mutex_init(&abba.b_mutex, &ww_class);
+ ww_mutex_init(&abba.a_mutex, class);
+ ww_mutex_init(&abba.b_mutex, class);
INIT_WORK_ONSTACK(&abba.work, test_abba_work);
init_completion(&abba.a_ready);
init_completion(&abba.b_ready);
+ abba.class = class;
abba.trylock = trylock;
abba.resolve = resolve;
schedule_work(&abba.work);
- ww_acquire_init_noinject(&ctx, &ww_class);
+ ww_acquire_init_noinject(&ctx, class);
if (!trylock)
ww_mutex_lock(&abba.a_mutex, &ctx);
else
@@ -278,6 +281,7 @@ static int test_abba(bool trylock, bool resolve)
struct test_cycle {
struct work_struct work;
+ struct ww_class *class;
struct ww_mutex a_mutex;
struct ww_mutex *b_mutex;
struct completion *a_signal;
@@ -291,7 +295,7 @@ static void test_cycle_work(struct work_struct *work)
struct ww_acquire_ctx ctx;
int err, erra = 0;
- ww_acquire_init_noinject(&ctx, &ww_class);
+ ww_acquire_init_noinject(&ctx, cycle->class);
ww_mutex_lock(&cycle->a_mutex, &ctx);
complete(cycle->a_signal);
@@ -314,7 +318,7 @@ static void test_cycle_work(struct work_struct *work)
cycle->result = err ?: erra;
}
-static int __test_cycle(unsigned int nthreads)
+static int __test_cycle(struct ww_class *class, unsigned int nthreads)
{
struct test_cycle *cycles;
unsigned int n, last = nthreads - 1;
@@ -327,7 +331,8 @@ static int __test_cycle(unsigned int nthreads)
for (n = 0; n < nthreads; n++) {
struct test_cycle *cycle = &cycles[n];
- ww_mutex_init(&cycle->a_mutex, &ww_class);
+ cycle->class = class;
+ ww_mutex_init(&cycle->a_mutex, class);
if (n == last)
cycle->b_mutex = &cycles[0].a_mutex;
else
@@ -367,13 +372,13 @@ static int __test_cycle(unsigned int nthreads)
return ret;
}
-static int test_cycle(unsigned int ncpus)
+static int test_cycle(struct ww_class *class, unsigned int ncpus)
{
unsigned int n;
int ret;
for (n = 2; n <= ncpus + 1; n++) {
- ret = __test_cycle(n);
+ ret = __test_cycle(class, n);
if (ret)
return ret;
}
@@ -384,6 +389,7 @@ static int test_cycle(unsigned int ncpus)
struct stress {
struct work_struct work;
struct ww_mutex *locks;
+ struct ww_class *class;
unsigned long timeout;
int nlocks;
};
@@ -443,7 +449,7 @@ static void stress_inorder_work(struct work_struct *work)
int contended = -1;
int n, err;
- ww_acquire_init(&ctx, &ww_class);
+ ww_acquire_init(&ctx, stress->class);
retry:
err = 0;
for (n = 0; n < nlocks; n++) {
@@ -511,7 +517,7 @@ static void stress_reorder_work(struct work_struct *work)
order = NULL;
do {
- ww_acquire_init(&ctx, &ww_class);
+ ww_acquire_init(&ctx, stress->class);
list_for_each_entry(ll, &locks, link) {
err = ww_mutex_lock(ll->lock, &ctx);
@@ -570,7 +576,7 @@ static void stress_one_work(struct work_struct *work)
#define STRESS_ONE BIT(2)
#define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
-static int stress(int nlocks, int nthreads, unsigned int flags)
+static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int flags)
{
struct ww_mutex *locks;
struct stress *stress_array;
@@ -588,7 +594,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
}
for (n = 0; n < nlocks; n++)
- ww_mutex_init(&locks[n], &ww_class);
+ ww_mutex_init(&locks[n], class);
count = 0;
for (n = 0; nthreads; n++) {
@@ -617,6 +623,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
stress = &stress_array[count++];
INIT_WORK(&stress->work, fn);
+ stress->class = class;
stress->locks = locks;
stress->nlocks = nlocks;
stress->timeout = jiffies + 2*HZ;
@@ -635,57 +642,82 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
return 0;
}
-static int __init test_ww_mutex_init(void)
+static int __init run_tests(struct ww_class *class)
{
int ncpus = num_online_cpus();
int ret, i;
- printk(KERN_INFO "Beginning ww mutex selftests\n");
-
- prandom_seed_state(&rng, get_random_u64());
-
- wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
- if (!wq)
- return -ENOMEM;
-
- ret = test_mutex();
+ ret = test_mutex(class);
if (ret)
return ret;
- ret = test_aa(false);
+ ret = test_aa(class, false);
if (ret)
return ret;
- ret = test_aa(true);
+ ret = test_aa(class, true);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
- ret = test_abba(i & 1, i & 2);
+ ret = test_abba(class, i & 1, i & 2);
if (ret)
return ret;
}
- ret = test_cycle(ncpus);
+ ret = test_cycle(class, ncpus);
if (ret)
return ret;
- ret = stress(16, 2*ncpus, STRESS_INORDER);
+ ret = stress(class, 16, 2*ncpus, STRESS_INORDER);
if (ret)
return ret;
- ret = stress(16, 2*ncpus, STRESS_REORDER);
+ ret = stress(class, 16, 2*ncpus, STRESS_REORDER);
if (ret)
return ret;
- ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
+ ret = stress(class, 2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
if (ret)
return ret;
- printk(KERN_INFO "All ww mutex selftests passed\n");
return 0;
}
+static int __init run_test_classes(void)
+{
+ int ret;
+
+ pr_info("Beginning ww (wound) mutex selftests\n");
+
+ ret = run_tests(&ww_class);
+ if (ret)
+ return ret;
+
+ pr_info("Beginning ww (die) mutex selftests\n");
+ ret = run_tests(&wd_class);
+ if (ret)
+ return ret;
+
+ pr_info("All ww mutex selftests passed\n");
+ return 0;
+}
+
+static int __init test_ww_mutex_init(void)
+{
+ int ret;
+
+ prandom_seed_state(&rng, get_random_u64());
+
+ wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+ if (!wq)
+ return -ENOMEM;
+
+ ret = run_test_classes();
+
+ return ret;
+}
+
static void __exit test_ww_mutex_exit(void)
{
destroy_workqueue(wq);
--
2.50.1.565.gc32cd1483b-goog
Powered by blists - more mailing lists