[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230203200138.3872873-1-jstultz@google.com>
Date: Fri, 3 Feb 2023 20:01:35 +0000
From: John Stultz <jstultz@...gle.com>
To: LKML <linux-kernel@...r.kernel.org>
Cc: John Stultz <jstultz@...gle.com>,
Davidlohr Bueso <dave@...olabs.net>,
"Paul E. McKenney" <paulmck@...nel.org>,
Josh Triplett <josh@...htriplett.org>,
Joel Fernandes <joel@...lfernandes.org>,
Juri Lelli <juri.lelli@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>
Subject: [PATCH v2 1/4] locktorture: Add nested_[un]lock() hooks and nlocks parameter
In order ot extend locktorture to support lock nesting, add
nested_lock() and nested_unlock() hooks to the torture ops.
These take a 32bit lockset mask which is generated at random,
so some number of locks will be taken before the main lock is
taken and released afterwards.
Additionally, add nlocks module parameter to allow specifying
the number of nested locks to be used.
This has been helpful to uncover issues in the proxy-exec
series development.
This was inspired by locktorture extensions originally implemented
by Connor O'Brien, for stress testing the proxy-execution series:
https://lore.kernel.org/lkml/20221003214501.2050087-12-connoro@google.com/
Comments or feedback would be greatly appreciated!
Cc: Davidlohr Bueso <dave@...olabs.net>
Cc: "Paul E. McKenney" <paulmck@...nel.org>
Cc: Josh Triplett <josh@...htriplett.org>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Juri Lelli <juri.lelli@...hat.com>
Cc: Valentin Schneider <vschneid@...hat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>
Signed-off-by: John Stultz <jstultz@...gle.com>
---
kernel/locking/locktorture.c | 23 +++++++++++++++++++----
1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 9c2fb613a55d..f4fbd3194654 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -48,6 +48,9 @@ torture_param(int, stat_interval, 60,
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(int, verbose, 1,
"Enable verbose debugging printk()s");
+torture_param(int, nlocks, 0, "Number of nested locks");
+/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
+#define MAX_LOCKS 8
static char *torture_type = "spin_lock";
module_param(torture_type, charp, 0444);
@@ -76,10 +79,12 @@ static void lock_torture_cleanup(void);
struct lock_torture_ops {
void (*init)(void);
void (*exit)(void);
+ int (*nested_lock)(int tid, u32 lockset);
int (*writelock)(int tid);
void (*write_delay)(struct torture_random_state *trsp);
void (*task_boost)(struct torture_random_state *trsp);
void (*writeunlock)(int tid);
+ void (*nested_unlock)(int tid, u32 lockset);
int (*readlock)(int tid);
void (*read_delay)(struct torture_random_state *trsp);
void (*readunlock)(int tid);
@@ -669,6 +674,7 @@ static int lock_torture_writer(void *arg)
struct lock_stress_stats *lwsp = arg;
int tid = lwsp - cxt.lwsa;
DEFINE_TORTURE_RANDOM(rand);
+ u32 lockset_mask;
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
set_user_nice(current, MAX_NICE);
@@ -677,7 +683,10 @@ static int lock_torture_writer(void *arg)
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
+ lockset_mask = torture_random(&rand);
cxt.cur_ops->task_boost(&rand);
+ if (cxt.cur_ops->nested_lock)
+ cxt.cur_ops->nested_lock(tid, lockset_mask);
cxt.cur_ops->writelock(tid);
if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_lock_fail++;
@@ -690,6 +699,8 @@ static int lock_torture_writer(void *arg)
lock_is_write_held = false;
WRITE_ONCE(last_lock_release, jiffies);
cxt.cur_ops->writeunlock(tid);
+ if (cxt.cur_ops->nested_unlock)
+ cxt.cur_ops->nested_unlock(tid, lockset_mask);
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
@@ -830,11 +841,11 @@ lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
- "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
+ "--- %s%s: nwriters_stress=%d nreaders_stress=%d nlocks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, cxt.debug_lock ? " [debug]": "",
- cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
- verbose, shuffle_interval, stutter, shutdown_secs,
- onoff_interval, onoff_holdoff);
+ cxt.nrealwriters_stress, cxt.nrealreaders_stress, nlocks,
+ stat_interval, verbose, shuffle_interval, stutter,
+ shutdown_secs, onoff_interval, onoff_holdoff);
}
static void lock_torture_cleanup(void)
@@ -1053,6 +1064,10 @@ static int __init lock_torture_init(void)
}
}
+ /* cap nlocks to MAX_LOCKS */
+ if (nlocks > MAX_LOCKS)
+ nlocks = MAX_LOCKS;
+
if (cxt.cur_ops->readlock) {
reader_tasks = kcalloc(cxt.nrealreaders_stress,
sizeof(reader_tasks[0]),
--
2.39.1.519.gcb327c4b5f-goog
Powered by blists - more mailing lists