[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230215061035.1534950-1-qiang1.zhang@intel.com>
Date: Wed, 15 Feb 2023 14:10:35 +0800
From: Zqiang <qiang1.zhang@...el.com>
To: dave@...olabs.net, paulmck@...nel.org, josh@...htriplett.org
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH] locktorture: Add raw_spinlock* torture tests for PREEMPT_RT kernels
For PREEMPT_RT kernel, the spin_lock, spin_lock_irq will converted
to sleepable rt_spin_lock and the interrupt related suffix for
spin_lock/unlock(_irq, irqsave/irqrestore) do not affect CPU's
interrupt state. this commit therefore add raw_spin_lock torture
tests, this is a strict spin lock implementation in RT kernels.
Signed-off-by: Zqiang <qiang1.zhang@...el.com>
---
kernel/locking/locktorture.c | 58 ++++++++++++++++++++++++++++++++++++
1 file changed, 58 insertions(+)
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 9425aff08936..521197366f27 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -257,6 +257,61 @@ static struct lock_torture_ops spin_lock_irq_ops = {
.name = "spin_lock_irq"
};
+#ifdef CONFIG_PREEMPT_RT
+static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
+
+static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
+__acquires(torture_raw_spinlock)
+{
+ raw_spin_lock(&torture_raw_spinlock);
+ return 0;
+}
+
+static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
+__releases(torture_raw_spinlock)
+{
+ raw_spin_unlock(&torture_raw_spinlock);
+}
+
+static struct lock_torture_ops raw_spin_lock_ops = {
+ .writelock = torture_raw_spin_lock_write_lock,
+ .write_delay = torture_spin_lock_write_delay,
+ .task_boost = torture_rt_boost,
+ .writeunlock = torture_raw_spin_lock_write_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
+ .name = "raw_spin_lock"
+};
+
+static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
+__acquires(torture_raw_spinlock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
+ cxt.cur_ops->flags = flags;
+ return 0;
+}
+
+static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
+__releases(torture_raw_spinlock)
+{
+ raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
+}
+
+static struct lock_torture_ops raw_spin_lock_irq_ops = {
+ .writelock = torture_raw_spin_lock_write_lock_irq,
+ .write_delay = torture_spin_lock_write_delay,
+ .task_boost = torture_rt_boost,
+ .writeunlock = torture_raw_spin_lock_write_unlock_irq,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
+ .name = "raw_spin_lock_irq"
+};
+#endif
+
static DEFINE_RWLOCK(torture_rwlock);
static int torture_rwlock_write_lock(int tid __maybe_unused)
@@ -1017,6 +1072,9 @@ static int __init lock_torture_init(void)
static struct lock_torture_ops *torture_ops[] = {
&lock_busted_ops,
&spin_lock_ops, &spin_lock_irq_ops,
+#ifdef CONFIG_PREEMPT_RT
+ &raw_spin_lock_ops, &raw_spin_lock_irq_ops,
+#endif
&rw_lock_ops, &rw_lock_irq_ops,
&mutex_lock_ops,
&ww_mutex_lock_ops,
--
2.25.1
Powered by blists - more mailing lists