[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250317-slub-percpu-caches-v3-1-9d9884d8b643@suse.cz>
Date: Mon, 17 Mar 2025 15:33:02 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Suren Baghdasaryan <surenb@...gle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Christoph Lameter <cl@...ux.com>, David Rientjes <rientjes@...gle.com>
Cc: Roman Gushchin <roman.gushchin@...ux.dev>,
Harry Yoo <harry.yoo@...cle.com>, Uladzislau Rezki <urezki@...il.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org, rcu@...r.kernel.org,
maple-tree@...ts.infradead.org, vbabka@...e.cz,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Davidlohr Bueso <dave@...olabs.net>, Alexei Starovoitov <ast@...nel.org>
Subject: [PATCH RFC v3 1/8] locking/local_lock: Introduce localtry_lock_t
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect
critical section, but it doesn't prevent NMI, so the fully reentrant
code cannot use local_lock_irqsave() for exclusive access.
Introduce localtry_lock_t and localtry_lock_irqsave() that
disables interrupts and sets acquired=1, so localtry_lock_irqsave()
from NMI attempting to acquire the same lock will return false.
In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock().
Map localtry_lock_irqsave() to preemptible spin_trylock().
When in hard IRQ or NMI return false right away, since
spin_trylock() is not safe due to explicit locking in the underneath
rt_spin_trylock() implementation. Removing this explicit locking and
attempting only "trylock" is undesired due to PI implications.
Note there is no need to use local_inc for acquired variable,
since it's a percpu variable with strict nesting scopes.
Acked-by: Davidlohr Bueso <dave@...olabs.net>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
Link: https://lore.kernel.org/r/20250222024427.30294-2-alexei.starovoitov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@...nel.org>
---
include/linux/local_lock.h | 70 +++++++++++++++++
include/linux/local_lock_internal.h | 146 ++++++++++++++++++++++++++++++++++++
2 files changed, 216 insertions(+)
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 091dc0b6bdfb9f4721f94d19828a38fbfa59346c..1a0bc35839e360d5c8170105849c3883463852f8 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -51,6 +51,76 @@
#define local_unlock_irqrestore(lock, flags) \
__local_unlock_irqrestore(lock, flags)
+/**
+ * localtry_lock_init - Runtime initialize a lock instance
+ */
+#define localtry_lock_init(lock) __localtry_lock_init(lock)
+
+/**
+ * localtry_lock - Acquire a per CPU local lock
+ * @lock: The lock variable
+ */
+#define localtry_lock(lock) __localtry_lock(lock)
+
+/**
+ * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
+ * @lock: The lock variable
+ */
+#define localtry_lock_irq(lock) __localtry_lock_irq(lock)
+
+/**
+ * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
+ * interrupts
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ */
+#define localtry_lock_irqsave(lock, flags) \
+ __localtry_lock_irqsave(lock, flags)
+
+/**
+ * localtry_trylock - Try to acquire a per CPU local lock.
+ * @lock: The lock variable
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define localtry_trylock(lock) __localtry_trylock(lock)
+
+/**
+ * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define localtry_trylock_irqsave(lock, flags) \
+ __localtry_trylock_irqsave(lock, flags)
+
+/**
+ * local_unlock - Release a per CPU local lock
+ * @lock: The lock variable
+ */
+#define localtry_unlock(lock) __localtry_unlock(lock)
+
+/**
+ * local_unlock_irq - Release a per CPU local lock and enable interrupts
+ * @lock: The lock variable
+ */
+#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock)
+
+/**
+ * localtry_unlock_irqrestore - Release a per CPU local lock and restore
+ * interrupt flags
+ * @lock: The lock variable
+ * @flags: Interrupt flags to restore
+ */
+#define localtry_unlock_irqrestore(lock, flags) \
+ __localtry_unlock_irqrestore(lock, flags)
+
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
local_unlock(_T))
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8dd71fbbb6d2b6748969438c4642f7d970834871..67bd13d142fac39bc0f8b2c05eaf81717ff480f9 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -15,6 +15,11 @@ typedef struct {
#endif
} local_lock_t;
+typedef struct {
+ local_lock_t llock;
+ unsigned int acquired;
+} localtry_lock_t;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
@@ -31,6 +36,13 @@ static inline void local_lock_acquire(local_lock_t *l)
l->owner = current;
}
+static inline void local_trylock_acquire(local_lock_t *l)
+{
+ lock_map_acquire_try(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
@@ -45,11 +57,13 @@ static inline void local_lock_debug_init(local_lock_t *l)
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
#define __local_lock_init(lock) \
do { \
@@ -118,6 +132,104 @@ do { \
#define __local_unlock_nested_bh(lock) \
local_lock_release(this_cpu_ptr(lock))
+/* localtry_lock_t variants */
+
+#define __localtry_lock_init(lock) \
+do { \
+ __local_lock_init(&(lock)->llock); \
+ WRITE_ONCE((lock)->acquired, 0); \
+} while (0)
+
+#define __localtry_lock(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ preempt_disable(); \
+ lt = this_cpu_ptr(lock); \
+ local_lock_acquire(<->llock); \
+ WRITE_ONCE(lt->acquired, 1); \
+ } while (0)
+
+#define __localtry_lock_irq(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ local_irq_disable(); \
+ lt = this_cpu_ptr(lock); \
+ local_lock_acquire(<->llock); \
+ WRITE_ONCE(lt->acquired, 1); \
+ } while (0)
+
+#define __localtry_lock_irqsave(lock, flags) \
+ do { \
+ localtry_lock_t *lt; \
+ local_irq_save(flags); \
+ lt = this_cpu_ptr(lock); \
+ local_lock_acquire(<->llock); \
+ WRITE_ONCE(lt->acquired, 1); \
+ } while (0)
+
+#define __localtry_trylock(lock) \
+ ({ \
+ localtry_lock_t *lt; \
+ bool _ret; \
+ \
+ preempt_disable(); \
+ lt = this_cpu_ptr(lock); \
+ if (!READ_ONCE(lt->acquired)) { \
+ WRITE_ONCE(lt->acquired, 1); \
+ local_trylock_acquire(<->llock); \
+ _ret = true; \
+ } else { \
+ _ret = false; \
+ preempt_enable(); \
+ } \
+ _ret; \
+ })
+
+#define __localtry_trylock_irqsave(lock, flags) \
+ ({ \
+ localtry_lock_t *lt; \
+ bool _ret; \
+ \
+ local_irq_save(flags); \
+ lt = this_cpu_ptr(lock); \
+ if (!READ_ONCE(lt->acquired)) { \
+ WRITE_ONCE(lt->acquired, 1); \
+ local_trylock_acquire(<->llock); \
+ _ret = true; \
+ } else { \
+ _ret = false; \
+ local_irq_restore(flags); \
+ } \
+ _ret; \
+ })
+
+#define __localtry_unlock(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ lt = this_cpu_ptr(lock); \
+ WRITE_ONCE(lt->acquired, 0); \
+ local_lock_release(<->llock); \
+ preempt_enable(); \
+ } while (0)
+
+#define __localtry_unlock_irq(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ lt = this_cpu_ptr(lock); \
+ WRITE_ONCE(lt->acquired, 0); \
+ local_lock_release(<->llock); \
+ local_irq_enable(); \
+ } while (0)
+
+#define __localtry_unlock_irqrestore(lock, flags) \
+ do { \
+ localtry_lock_t *lt; \
+ lt = this_cpu_ptr(lock); \
+ WRITE_ONCE(lt->acquired, 0); \
+ local_lock_release(<->llock); \
+ local_irq_restore(flags); \
+ } while (0)
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -125,8 +237,10 @@ do { \
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
+typedef spinlock_t localtry_lock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
#define __local_lock_init(l) \
do { \
@@ -169,4 +283,36 @@ do { \
spin_unlock(this_cpu_ptr((lock))); \
} while (0)
+/* localtry_lock_t variants */
+
+#define __localtry_lock_init(lock) __local_lock_init(lock)
+#define __localtry_lock(lock) __local_lock(lock)
+#define __localtry_lock_irq(lock) __local_lock(lock)
+#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags)
+#define __localtry_unlock(lock) __local_unlock(lock)
+#define __localtry_unlock_irq(lock) __local_unlock(lock)
+#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags)
+
+#define __localtry_trylock(lock) \
+ ({ \
+ int __locked; \
+ \
+ if (in_nmi() | in_hardirq()) { \
+ __locked = 0; \
+ } else { \
+ migrate_disable(); \
+ __locked = spin_trylock(this_cpu_ptr((lock))); \
+ if (!__locked) \
+ migrate_enable(); \
+ } \
+ __locked; \
+ })
+
+#define __localtry_trylock_irqsave(lock, flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __localtry_trylock(lock); \
+ })
+
#endif /* CONFIG_PREEMPT_RT */
--
2.48.1
Powered by blists - more mailing lists