[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170529210302.26868-8-nicolas.pitre@linaro.org>
Date: Mon, 29 May 2017 17:03:02 -0400
From: Nicolas Pitre <nicolas.pitre@...aro.org>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 7/7] rtmutex: compatibility with CONFIG_SCHED_RT=n
With no actual RT task, there is no priority inversion issues to care about.
We can therefore map RT mutexes to regular mutexes in that case and remain
compatible with most users.
Signed-off-by: Nicolas Pitre <nico@...aro.org>
---
include/linux/rtmutex.h | 69 +++++++++++++++++++++++++++++++++++++++++++++++++
kernel/locking/Makefile | 2 ++
lib/Kconfig.debug | 2 +-
3 files changed, 72 insertions(+), 1 deletion(-)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1abba5ce2a..05c444f930 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -12,6 +12,8 @@
#ifndef __LINUX_RT_MUTEX_H
#define __LINUX_RT_MUTEX_H
+#ifdef CONFIG_SCHED_RT
+
#include <linux/linkage.h>
#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
@@ -98,4 +100,71 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
+#else /* CONFIG_SCHED_RT */
+
+/*
+ * We have no realtime task support and therefore no priority inversion
+ * may occur. Let's map RT mutexes using regular mutexes.
+ */
+
+#include <linux/mutex.h>
+
+struct rt_mutex {
+ struct mutex m;
+};
+
+#define __RT_MUTEX_INITIALIZER(m) \
+ { .m = __MUTEX_INITIALIZER(m) }
+
+#define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+
+static inline void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+{
+ static struct lock_class_key __key;
+ __mutex_init(&lock->m, name, &__key);
+}
+
+#define rt_mutex_init(mutex) __rt_mutex_init(mutex, #mutex)
+
+static inline int rt_mutex_is_locked(struct rt_mutex *lock)
+{
+ return mutex_is_locked(&lock->m);
+}
+
+static inline void rt_mutex_destroy(struct rt_mutex *lock)
+{
+ mutex_destroy(&lock->m);
+}
+
+static inline void rt_mutex_lock(struct rt_mutex *lock)
+{
+ mutex_lock(&lock->m);
+}
+
+static inline int rt_mutex_lock_interruptible(struct rt_mutex *lock)
+{
+ return mutex_lock_interruptible(&lock->m);
+}
+
+static inline int rt_mutex_trylock(struct rt_mutex *lock)
+{
+ return mutex_trylock(&lock->m);
+}
+
+static inline void rt_mutex_unlock(struct rt_mutex *lock)
+{
+ mutex_unlock(&lock->m);
+}
+
+static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+{
+ return 0;
+}
+#define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+#define rt_mutex_debug_task_free(t) do { } while (0)
+
+#endif /* CONFIG_SCHED_RT */
+
#endif
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 760158d9d9..7a076be456 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -20,8 +20,10 @@ obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
+ifeq ($(CONFIG_SCHED_RT),y)
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+endif
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e4587ebe52..0ecc7eb9dc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1008,7 +1008,7 @@ menu "Lock Debugging (spinlocks, mutexes, etc...)"
config DEBUG_RT_MUTEXES
bool "RT Mutex debugging, deadlock detection"
- depends on DEBUG_KERNEL && RT_MUTEXES
+ depends on DEBUG_KERNEL && RT_MUTEXES && SCHED_RT
help
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
--
2.9.4
Powered by blists - more mailing lists