lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250813151939.601040635@linutronix.de>
Date: Wed, 13 Aug 2025 17:57:02 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>,
 Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
 Peter Zijlstra <peterz@...radead.org>,
 Darren Hart <dvhart@...radead.org>,
 Davidlohr Bueso <dave@...olabs.net>,
 André Almeida <andrealmeid@...lia.com>,
 x86@...nel.org,
 Alexander Viro <viro@...iv.linux.org.uk>,
 Christian Brauner <brauner@...nel.org>,
 Jan Kara <jack@...e.cz>,
 linux-fsdevel@...r.kernel.org
Subject: [patch 1/4] uaccess: Provide common helpers for masked user access

commit 2865baf54077 ("x86: support user address masking instead of
non-speculative conditional") provided an optimization for
unsafe_get/put_user(), which optimizes the Spectre-V1 mitigation in an
architecture specific way. Currently only x86_64 supports that.

The required code pattern is:

	if (can_do_masked_user_access())
		dst = masked_user_access_begin(dst);
	else if (!user_write_access_begin(dst, sizeof(*dst)))
		return -EFAULT;
	unsafe_put_user(val, dst, Efault);
	user_read_access_end();
	return 0;
Efault:
	user_read_access_end();
	return -EFAULT;

The futex code already grew an instance of that and there are other areas,
which can be optimized, when the calling code actually verified before,
that the user pointer is both aligned and actually in user space.

Use the futex example and provide generic helper inlines for that to avoid
having tons of copies all over the tree.

This provides get/put_user_masked_uNN() where $NN is the variable size in
bits, i.e. 8, 16, 32, 64.

The second set of helpers is to encapsulate the prologue for larger access
patterns, e.g. multiple consecutive unsafe_put/get_user() scenarioes:

	if (can_do_masked_user_access())
		dst = masked_user_access_begin(dst);
	else if (!user_write_access_begin(dst, sizeof(*dst)))
		return -EFAULT;
	unsafe_put_user(a, &dst->a, Efault);
	unsafe_put_user(b, &dst->b, Efault);
	user_write_access_end();
	return 0;
Efault:
	user_write_access_end();
	return -EFAULT;

which allows to shorten this to:

	if (!user_write_masked_begin(dst))
		return -EFAULT;
	unsafe_put_user(a, &dst->a, Efault);
	...

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
 include/linux/uaccess.h |   78 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 78 insertions(+)

--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -569,6 +569,84 @@ static inline void user_access_restore(u
 #define user_read_access_end user_access_end
 #endif
 
+/*
+ * Conveniance macros to avoid spreading this pattern all over the place
+ */
+#define user_read_masked_begin(src) ({					\
+	bool __ret = true;						\
+									\
+	if (can_do_masked_user_access())				\
+		src = masked_user_access_begin(src);			\
+	else if (!user_read_access_begin(src, sizeof(*src)))		\
+		__ret = false;						\
+	__ret;								\
+})
+
+#define user_write_masked_begin(dst) ({					\
+	bool __ret = true;						\
+									\
+	if (can_do_masked_user_access())				\
+		dst = masked_user_access_begin(dst);			\
+	else if (!user_write_access_begin(dst, sizeof(*dst)))		\
+		__ret = false;						\
+	__ret;								\
+})
+
+/*
+ * get_user_masked_uNN() and put_user_masked_uNN() where NN is the size of
+ * the variable in bits. Supported values are 8, 16, 32 and 64.
+ *
+ * These functions can be used to optimize __get_user() and __put_user()
+ * scenarios, if the architecture supports masked user access. This avoids
+ * the more costly speculation barriers. If the architecture does not
+ * support it, it falls back to user_*_access_begin().
+ *
+ * As with __get/put_user() the user pointer has to be verified by the
+ * caller to be actually in user space.
+ */
+#define GEN_GET_USER_MASKED(type)					\
+	static __always_inline						\
+	int get_user_masked_##type (type *dst, type __user *src)	\
+	{								\
+		type val;						\
+									\
+		if (!user_read_masked_begin(src))			\
+			return -EFAULT;					\
+		unsafe_get_user(val, src, Efault);			\
+		user_read_access_end();					\
+		*dst = val;						\
+		return 0;						\
+	Efault:								\
+		user_read_access_end();					\
+		return -EFAULT;						\
+	}
+
+GEN_GET_USER_MASKED(u8)
+GEN_GET_USER_MASKED(u16)
+GEN_GET_USER_MASKED(u32)
+GEN_GET_USER_MASKED(u64)
+#undef GEN_GET_USER_MASKED
+
+#define GEN_PUT_USER_MASKED(type)					\
+	static __always_inline						\
+	int put_user_masked_##type (type val, type __user *dst)		\
+	{								\
+		if (!user_write_masked_begin(dst))			\
+			return -EFAULT;					\
+		unsafe_put_user(val, dst, Efault);			\
+		user_write_access_end();				\
+		return 0;						\
+	Efault:								\
+		user_write_access_end();				\
+		return -EFAULT;						\
+	}
+
+GEN_PUT_USER_MASKED(u8)
+GEN_PUT_USER_MASKED(u16)
+GEN_PUT_USER_MASKED(u32)
+GEN_PUT_USER_MASKED(u64)
+#undef GEN_PUT_USER_MASKED
+
 #ifdef CONFIG_HARDENED_USERCOPY
 void __noreturn usercopy_abort(const char *name, const char *detail,
 			       bool to_user, unsigned long offset,


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ