[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251023124400.3279-1-david.laight.linux@gmail.com>
Date: Thu, 23 Oct 2025 13:44:00 +0100
From: David Laight <david.laight.linux@...il.com>
To: linux-kernel@...r.kernel.org
Cc: David Laight <david.laight.linux@...il.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Jan Kara <jack@...e.cz>,
Arnd Bergmann <arnd@...db.de>,
Kees Cook <kees@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
x86@...nel.org,
Christophe Leroy <christophe.leroy@...roup.eu>
Subject: [PATCH 1/1] uaccess: Rename masked_user_access to sanitised_user_access
masked_user_access is a bad name and should probably be reserved for
code that is removing high address bits that the hardware ignores (LAM).
The typical implementation is uaddr = min(uaddr, guard_page) which
isn't a 'masking' operation.
Move some of the comments from arch/x86/include/asm/uaccess_64.h to
include/linux/uaccess.h
Update the few places where it is used.
Signed-off-by: David Laight <david.laight.linux@...il.com>
---
For minimal churn this rename (or a better name that comes from
the following bikeshed discussion) probably needs applying after
the scoped_user_XXX_access() patch and before the ppc implementation.
arch/x86/include/asm/futex.h | 8 ++++----
arch/x86/include/asm/uaccess_64.h | 17 +++++++++--------
fs/select.c | 4 ++--
include/linux/uaccess.h | 22 +++++++++++++++-------
kernel/futex/futex.h | 8 ++++----
lib/strncpy_from_user.c | 4 ++--
lib/strnlen_user.c | 4 ++--
net/core/scm.c | 4 ++--
net/socket.c | 4 ++--
9 files changed, 42 insertions(+), 33 deletions(-)
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index 6e2458088800..91228a39474d 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -48,8 +48,8 @@ do { \
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
+ if (can_do_sanitised_user_access())
+ uaddr = sanitised_user_access_begin(uaddr);
else if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
@@ -86,8 +86,8 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
int ret = 0;
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
+ if (can_do_sanitised_user_access())
+ uaddr = sanitised_user_access_begin(uaddr);
else if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
asm volatile("\n"
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index c8a5ae35c871..612a291fffa7 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -57,11 +57,12 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
/*
- * Masking the user address is an alternative to a conditional
- * user_access_begin that can avoid the fencing. This only works
- * for dense accesses starting at the address.
+ * 'Sanitise' kernel addresses to the base of the unmapped page
+ * between user and kernel addresses using ALU instructions.
+ * This saves a conditional branch and avoids the need for a fence instruction
+ * to avoid the side effects of speculative reads from kernel memory.
*/
-static inline void __user *mask_user_address(const void __user *ptr)
+static inline void __user *sanitise_user_address(const void __user *ptr)
{
void __user *ret;
asm("cmp %1,%0\n\t"
@@ -71,10 +72,10 @@ static inline void __user *mask_user_address(const void __user *ptr)
"0" (ptr));
return ret;
}
-#define masked_user_access_begin(x) ({ \
- __auto_type __masked_ptr = (x); \
- __masked_ptr = mask_user_address(__masked_ptr); \
- __uaccess_begin(); __masked_ptr; })
+#define sanitised_user_access_begin(x) ({ \
+ __auto_type __sanitised_ptr = (x); \
+ __sanitised_ptr = sanitise_user_address(__sanitised_ptr); \
+ __uaccess_begin(); __sanitised_ptr; })
/*
* User pointers can have tag bits on x86-64. This scheme tolerates
diff --git a/fs/select.c b/fs/select.c
index 082cf60c7e23..687ee8f6437d 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -776,8 +776,8 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
+ if (can_do_sanitised_user_access())
+ from = sanitised_user_access_begin(from);
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(to->p, &from->p, Efault);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 1beb5b395d81..f49103c04046 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -33,12 +33,20 @@
})
#endif
-#ifdef masked_user_access_begin
- #define can_do_masked_user_access() 1
+/*
+ * Sanitising the user address is an alternative to a conditional
+ * user_access_begin that can avoid synchronising instructions.
+ * Kernel addresses are 'sanitised' to the base of an unmapped page
+ * between user and kernel addresses, accesses will then fault.
+ * This works provided the accesses are 'reasonably sequential'.
+ */
+
+#ifdef sanitised_user_access_begin
+ #define can_do_sanitised_user_access() 1
#else
- #define can_do_masked_user_access() 0
- #define masked_user_access_begin(src) NULL
- #define mask_user_address(src) (src)
+ #define can_do_sanitised_user_access() 0
+ #define sanitised_user_access_begin(src) NULL
+ #define sanitise_user_address(src) (src)
#endif
/*
@@ -162,8 +170,8 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
might_fault();
if (should_fail_usercopy())
goto fail;
- if (can_do_masked_user_access())
- from = mask_user_address(from);
+ if (can_do_sanitised_user_access())
+ from = sanitise_user_address(from);
else {
if (!access_ok(from, n))
goto fail;
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 2cd57096c38e..ecd568112a19 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -302,8 +302,8 @@ static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
{
u32 val;
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
+ if (can_do_sanitised_user_access())
+ from = sanitised_user_access_begin(from);
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
@@ -317,8 +317,8 @@ static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
static __always_inline int futex_put_value(u32 val, u32 __user *to)
{
- if (can_do_masked_user_access())
- to = masked_user_access_begin(to);
+ if (can_do_sanitised_user_access())
+ to = sanitised_user_access_begin(to);
else if (!user_write_access_begin(to, sizeof(*to)))
return -EFAULT;
unsafe_put_user(val, to, Efault);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 6dc234913dd5..cc4220446647 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -123,10 +123,10 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
kasan_check_write(dst, count);
check_object_size(dst, count, false);
- if (can_do_masked_user_access()) {
+ if (can_do_sanitised_user_access()) {
long retval;
- src = masked_user_access_begin(src);
+ src = sanitised_user_access_begin(src);
retval = do_strncpy_from_user(dst, src, count, count);
user_read_access_end();
return retval;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 6e489f9e90f1..79ad77204574 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -96,10 +96,10 @@ long strnlen_user(const char __user *str, long count)
if (unlikely(count <= 0))
return 0;
- if (can_do_masked_user_access()) {
+ if (can_do_sanitised_user_access()) {
long retval;
- str = masked_user_access_begin(str);
+ str = sanitised_user_access_begin(str);
retval = do_strnlen_user(str, count, count);
user_read_access_end();
return retval;
diff --git a/net/core/scm.c b/net/core/scm.c
index 66eaee783e8b..98df16c8ab13 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -273,8 +273,8 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
check_object_size(data, cmlen - sizeof(*cm), true);
- if (can_do_masked_user_access())
- cm = masked_user_access_begin(cm);
+ if (can_do_sanitised_user_access())
+ cm = sanitised_user_access_begin(cm);
else if (!user_write_access_begin(cm, cmlen))
goto efault;
diff --git a/net/socket.c b/net/socket.c
index e8892b218708..eac71edd209e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -280,8 +280,8 @@ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
BUG_ON(klen > sizeof(struct sockaddr_storage));
- if (can_do_masked_user_access())
- ulen = masked_user_access_begin(ulen);
+ if (can_do_sanitised_user_access())
+ ulen = sanitised_user_access_begin(ulen);
else if (!user_access_begin(ulen, 4))
return -EFAULT;
--
2.39.5
Powered by blists - more mailing lists