[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250916163252.290994801@linutronix.de>
Date: Tue, 16 Sep 2025 18:33:16 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
x86@...nel.org,
kernel test robot <lkp@...el.com>,
Russell King <linux@...linux.org.uk>,
linux-arm-kernel@...ts.infradead.org,
Nathan Chancellor <nathan@...nel.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Darren Hart <dvhart@...radead.org>,
Davidlohr Bueso <dave@...olabs.net>,
André Almeida <andrealmeid@...lia.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Jan Kara <jack@...e.cz>,
linux-fsdevel@...r.kernel.org
Subject: [patch V2 5/6] x86/futex: Convert to scoped masked user access
Replace the open coded implementation with the scoped masked user access
mechanism.
No functional change intended.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: x86@...nel.org
---
V2: Convert to scoped masked access
Use RW access functions - Christophe
---
arch/x86/include/asm/futex.h | 76 ++++++++++++++++++-------------------------
1 file changed, 32 insertions(+), 44 deletions(-)
---
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -48,37 +48,29 @@ do { \
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
- else if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
-
- switch (op) {
- case FUTEX_OP_SET:
- unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
- break;
- case FUTEX_OP_ADD:
- unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
- uaddr, oparg, Efault);
- break;
- case FUTEX_OP_OR:
- unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
- break;
- case FUTEX_OP_ANDN:
- unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
- break;
- case FUTEX_OP_XOR:
- unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
- break;
- default:
- user_access_end();
- return -ENOSYS;
- }
- user_access_end();
+ scoped_masked_user_rw_access(uaddr, return -EFAULT, {
+ switch (op) {
+ case FUTEX_OP_SET:
+ unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, scope_fault);
+ break;
+ case FUTEX_OP_ADD:
+ unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
+ uaddr, oparg, scope_fault);
+ break;
+ case FUTEX_OP_OR:
+ unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, scope_fault);
+ break;
+ case FUTEX_OP_ANDN:
+ unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, scope_fault);
+ break;
+ case FUTEX_OP_XOR:
+ unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, scope_fault);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ });
return 0;
-Efault:
- user_access_end();
- return -EFAULT;
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
@@ -86,20 +78,16 @@ static inline int futex_atomic_cmpxchg_i
{
int ret = 0;
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
- else if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
- asm volatile("\n"
- "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
- "2:\n"
- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "r" (newval), "1" (oldval)
- : "memory"
- );
- user_access_end();
- *uval = oldval;
+ scoped_masked_user_rw_access(uaddr, return -EFAULT, {
+ asm volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "r" (newval), "1" (oldval)
+ : "memory");
+ *uval = oldval;
+ });
return ret;
}
Powered by blists - more mailing lists