[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Yjr/9ATrCDxr/0hq@linutronix.de>
Date: Wed, 23 Mar 2022 12:09:40 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Borislav Petkov <bp@...en8.de>, Ingo Molnar <mingo@...nel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Thomas Gleixner <tglx@...utronix.de>,
Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH] locking/local_lock: Pretend to use the per-CPU variable if
not needed.
In the !RT && !LOCKDEP case the per-CPU variables aren't used. The commit
mentioned below tried to avoid the usage of this_cpu_ptr() because it
generates code, clobbers registers which is not needed.
This change generated so little reference to the variable that llvm
assumed that it is not used and created a warning.
Revert local_lock_*() to its previous static inline implementation for
type checking.
Replace this_cpu_ptr() with __ll_cpu_ptr() which points to
this_cpu_ptr() when it is used.
In the !RT && !LOCKDEP case use per_cpu_ptr(, 0) which does not leave
any code behind and llvm does not complain either. It also ensures that
it is a per-CPU pointer. The assembly output in this case is unchanged.
Fixes: 9983a9d577db4 ("locking/local_lock: Make the empty local_lock_*() function a macro.")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
include/linux/local_lock_internal.h | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 6d635e8306d64..e671ead5fbad5 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -24,6 +24,8 @@ typedef struct {
}, \
.owner = NULL,
+#define __ll_cpu_ptr(__ll_cpuptr) (this_cpu_ptr(__ll_cpuptr))
+
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -44,9 +46,10 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
-# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
+# define __ll_cpu_ptr(__ll_cpuptr) per_cpu_ptr(__ll_cpuptr, 0)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
@@ -65,36 +68,36 @@ do { \
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ local_lock_acquire(__ll_cpu_ptr(lock)); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ local_lock_acquire(__ll_cpu_ptr(lock)); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ local_lock_acquire(__ll_cpu_ptr(lock)); \
} while (0)
#define __local_unlock(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ local_lock_release(__ll_cpu_ptr(lock)); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ local_lock_release(__ll_cpu_ptr(lock)); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ local_lock_release(__ll_cpu_ptr(lock)); \
local_irq_restore(flags); \
} while (0)
--
2.35.1
Powered by blists - more mailing lists