[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210815211306.023630962@linutronix.de>
Date: Sun, 15 Aug 2021 23:29:28 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Davidlohr Bueso <dave@...olabs.net>,
Mike Galbraith <efault@....de>
Subject: [patch V5 72/72] locking/local_lock: Add PREEMPT_RT support
From: Thomas Gleixner <tglx@...utronix.de>
On PREEMPT_RT enabled kernels local_lock maps to a per CPU 'sleeping'
spinlock which protects the critical section while staying preemptible. CPU
locality is established by disabling migration.
Provide the necessary types and macros to substitute the non-RT variant.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
V5: New patch
---
include/linux/local_lock_internal.h | 48 ++++++++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -6,6 +6,8 @@
#include <linux/percpu-defs.h>
#include <linux/lockdep.h>
+#ifndef CONFIG_PREEMPT_RT
+
typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -95,3 +97,49 @@ do { \
local_lock_release(this_cpu_ptr(lock)); \
local_irq_restore(flags); \
} while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock which protects the
+ * critical section while staying preemptible.
+ */
+typedef struct {
+ spinlock_t lock;
+} local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) { \
+ __LOCAL_SPIN_LOCK_UNLOCKED((lockname).lock) \
+ }
+
+#define __local_lock_init(l) \
+ do { \
+ local_spin_lock_init(&(l)->lock); \
+ } while (0)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock(&(this_cpu_ptr((__lock)))->lock); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock(&(this_cpu_ptr((__lock)))->lock); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */
Powered by blists - more mailing lists