[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180713175049.nx2xg4no4zbfcnfl@linutronix.de>
Date: Fri, 13 Jul 2018 19:50:49 +0200
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Steven Rostedt <rostedt@...dmis.org>
Cc: linux-rt-users@...r.kernel.org, linux-kernel@...r.kernel.org,
tglx@...utronix.de, Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
linux-arm-kernel@...ts.infradead.org,
Mike Galbraith <efault@....de>
Subject: [PATCH RT] locallock: add local_lock_bh()
For the ARM64 simd locking it would be easier to have local_lock_bh()
which grabs a local_lock with BH disabled and turns into a
local_bh_disable() on !RT.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
obviously required by the previous oneā¦
include/linux/locallock.h | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
index 921eab83cd34..15aa0dea2bfb 100644
--- a/include/linux/locallock.h
+++ b/include/linux/locallock.h
@@ -47,9 +47,23 @@ static inline void __local_lock(struct local_irq_lock *lv)
lv->nestcnt++;
}
+static inline void __local_lock_bh(struct local_irq_lock *lv)
+{
+ if (lv->owner != current) {
+ spin_lock_bh(&lv->lock);
+ LL_WARN(lv->owner);
+ LL_WARN(lv->nestcnt);
+ lv->owner = current;
+ }
+ lv->nestcnt++;
+}
+
#define local_lock(lvar) \
do { __local_lock(&get_local_var(lvar)); } while (0)
+#define local_lock_bh(lvar) \
+ do { __local_lock_bh(&get_local_var(lvar)); } while (0)
+
#define local_lock_on(lvar, cpu) \
do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
@@ -88,12 +102,29 @@ static inline void __local_unlock(struct local_irq_lock *lv)
spin_unlock(&lv->lock);
}
+static inline void __local_unlock_bh(struct local_irq_lock *lv)
+{
+ LL_WARN(lv->nestcnt == 0);
+ LL_WARN(lv->owner != current);
+ if (--lv->nestcnt)
+ return;
+
+ lv->owner = NULL;
+ spin_unlock_bh(&lv->lock);
+}
+
#define local_unlock(lvar) \
do { \
__local_unlock(this_cpu_ptr(&lvar)); \
put_local_var(lvar); \
} while (0)
+#define local_unlock_bh(lvar) \
+ do { \
+ __local_unlock_bh(this_cpu_ptr(&lvar)); \
+ put_local_var(lvar); \
+ } while (0)
+
#define local_unlock_on(lvar, cpu) \
do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
@@ -253,6 +284,8 @@ static inline void local_irq_lock_init(int lvar) { }
#define local_lock(lvar) preempt_disable()
#define local_unlock(lvar) preempt_enable()
+#define local_lock_bh(lvar) local_bh_disable()
+#define local_unlock_bh(lvar) local_bh_enable()
#define local_lock_irq(lvar) local_irq_disable()
#define local_lock_irq_on(lvar, cpu) local_irq_disable()
#define local_unlock_irq(lvar) local_irq_enable()
--
2.18.0
Powered by blists - more mailing lists