lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250916163252.164475057@linutronix.de>
Date: Tue, 16 Sep 2025 18:33:13 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>,
 Peter Zijlstra <peterz@...radead.org>,
 Christophe Leroy <christophe.leroy@...roup.eu>,
 kernel test robot <lkp@...el.com>,
 Russell King <linux@...linux.org.uk>,
 linux-arm-kernel@...ts.infradead.org,
 Nathan Chancellor <nathan@...nel.org>,
 Darren Hart <dvhart@...radead.org>,
 Davidlohr Bueso <dave@...olabs.net>,
 André Almeida <andrealmeid@...lia.com>,
 x86@...nel.org,
 Alexander Viro <viro@...iv.linux.org.uk>,
 Christian Brauner <brauner@...nel.org>,
 Jan Kara <jack@...e.cz>,
 linux-fsdevel@...r.kernel.org
Subject: [patch V2 3/6] uaccess: Provide scoped masked user access regions

User space access regions are tedious and require similar code patterns all
over the place:

     	if (!user_read_access_begin(from, sizeof(*from)))
		return -EFAULT;
	unsafe_get_user(val, from, Efault);
	user_read_access_end();
	return 0;
Efault:
	user_read_access_end();
	return -EFAULT;

This got worse with the recend addition of masked user access, which
optimizes the speculation prevention:

	if (can_do_masked_user_access())
		from = masked_user_read_access_begin((from));
	else if (!user_read_access_begin(from, sizeof(*from)))
		return -EFAULT;
	unsafe_get_user(val, from, Efault);
	user_read_access_end();
	return 0;
Efault:
	user_read_access_end();
	return -EFAULT;

There have been issues with using the wrong user_*_access_end() variant in
the error path and other typical Copy&Pasta problems, e.g. using the wrong
fault label in the user accessor which ends up using the wrong accesss end
variant. 

These patterns beg for scopes with automatic cleanup. The resulting outcome
is:
    	scoped_masked_user_read_access(from, return -EFAULT,
		scoped_get_user(val, from); );
	return 0;

The scope guarantees the proper cleanup for the access mode is invoked both
in the success and the failure (fault) path

The fault label is scope local and always 'scope_fault:', which prevents
mixing up fault labels between scopes. It is marked __maybe_unused so that
user access, which does not use a fault label and modifies a result
variable in the exception handler, does not cause a defined but unused
warning.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Christophe Leroy <christophe.leroy@...roup.eu>
---
V2: Remove the shady wrappers around the opening and use scopes with automatic cleanup
---
 include/linux/uaccess.h |  151 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 151 insertions(+)

--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,6 +2,7 @@
 #ifndef __LINUX_UACCESS_H__
 #define __LINUX_UACCESS_H__
 
+#include <linux/cleanup.h>
 #include <linux/fault-inject-usercopy.h>
 #include <linux/instrumented.h>
 #include <linux/minmax.h>
@@ -35,9 +36,17 @@
 
 #ifdef masked_user_access_begin
  #define can_do_masked_user_access() 1
+# ifndef masked_user_write_access_begin
+#  define masked_user_write_access_begin masked_user_access_begin
+# endif
+# ifndef masked_user_read_access_begin
+#  define masked_user_read_access_begin masked_user_access_begin
+#endif
 #else
  #define can_do_masked_user_access() 0
  #define masked_user_access_begin(src) NULL
+ #define masked_user_read_access_begin(src) NULL
+ #define masked_user_write_access_begin(src) NULL
  #define mask_user_address(src) (src)
 #endif
 
@@ -569,6 +578,148 @@ static inline void user_access_restore(u
 #define user_read_access_end user_access_end
 #endif
 
+/* Define RW variant so the below _mode macro expansion works */
+#define masked_user_rw_access_begin(u)	masked_user_access_begin(u)
+#define user_rw_access_begin(u, s)	user_access_begin(u, s)
+#define user_rw_access_end()		user_access_end()
+
+/* Scoped user access */
+#define USER_ACCESS_GUARD(_mode)					\
+static __always_inline void __user *					\
+class_masked_user_##_mode##_begin(void __user *ptr)			\
+{									\
+	return ptr;							\
+}									\
+									\
+static __always_inline void						\
+class_masked_user_##_mode##_end(void __user *ptr)			\
+{									\
+	user_##_mode##_access_end();					\
+}									\
+									\
+DEFINE_CLASS(masked_user_ ##_mode## _access, void __user *,		\
+	     class_masked_user_##_mode##_end(_T),			\
+	     class_masked_user_##_mode##_begin(ptr), void __user *ptr)	\
+									\
+static __always_inline class_masked_user_##_mode##_access_t		\
+class_masked_user_##_mode##_access_ptr(void __user *scope)		\
+{									\
+	return scope;							\
+}									\
+
+USER_ACCESS_GUARD(read)
+USER_ACCESS_GUARD(write)
+USER_ACCESS_GUARD(rw)
+#undef USER_ACCESS_GUARD
+
+/**
+ * __scoped_masked_user_access - Open a scope for masked user access
+ * @_mode:	The mode of the access class (read, write, rw)
+ * @_uptr:	The pointer to access user space memory
+ * @_ecode:	Code to inject for the failure case
+ * @_code:	The code to inject inside the scope
+ *
+ * When the scope is left user_##@...de##_access_end() is invoked, if the
+ * corresponding user_##@...de##_masked_begin() succeeded.
+ *
+ * The user access function inside the scope must use a fault label, which
+ * is inside the scope. __scoped_masked_user_access() provides a default
+ * label @scope_label, which is placed right before @_ecode. This label is
+ * scope local, so multiple masked user scopes can be in one function.
+ *
+ * The user access helpers scoped_put_user() and scoped_get_user() use
+ * @scope_label automatically.
+ *
+ *  A single line statement in the scope::
+ *
+ *	scoped_masked_user_read_access(ptr, return false,
+ *		scoped_get_user(val, ptr););
+ *
+ *  Multi-line statement::
+ *
+ *	scoped_masked_user_rw_access(ptr, return false, {
+ *		scoped_get_user(rval, &ptr->rval);
+ *		scoped_put_user(wval, &ptr->wval);
+ *	});
+ */
+#define __scoped_masked_user_access(_mode, _uptr, _ecode, _code)	\
+do {									\
+	unsigned long size = sizeof(*(_uptr));				\
+	typeof((_uptr)) _tmpptr = (_uptr);				\
+	bool proceed = true;						\
+									\
+	/*								\
+	 * Must be outside the CLASS scope below to handle the fail	\
+	 * of the non-masked access_begin() case correctly.		\
+	 */								\
+	if (can_do_masked_user_access())				\
+		_tmpptr = masked_user_##_mode##_access_begin((_uptr));	\
+	else								\
+		proceed = user_##_mode##_access_begin((_uptr), size);	\
+									\
+	if (!proceed) {							\
+		_ecode;							\
+	} else {							\
+		__label__ scope_fault;					\
+		CLASS(masked_user_##_mode##_access, scope) (_tmpptr);	\
+		/* Force modified pointer usage in @_code */		\
+		const typeof((_uptr)) _uptr = _tmpptr;			\
+									\
+		_code;							\
+		if (0) {						\
+		scope_fault:						\
+			__maybe_unused;					\
+			_ecode;						\
+		}							\
+	}								\
+} while (0)
+
+/**
+ * scoped_masked_user_read_access - Start a scoped __user_masked_read_access()
+ * @_usrc:	Pointer to the user space address to read from
+ * @_ecode:	Code to inject for the failure case
+ *
+ * For further information see __scoped_masked_user_access() above.
+ */
+#define scoped_masked_user_read_access(_usrc, _ecode, _code)	\
+	__scoped_masked_user_access(read, (_usrc), _ecode, ({_code}))
+
+/**
+ * scoped_masked_user_write_access - Start a scoped __user_masked_write_access()
+ * @_udst:	Pointer to the user space address to write to
+ * @_ecode:	Code to inject for the failure case
+ *
+ * For further information see __scoped_masked_user_access() above.
+ */
+#define scoped_masked_user_write_access(_udst, _ecode, _code)	\
+	__scoped_masked_user_access(write, (_udst), _ecode, ({_code}))
+
+/**
+ * scoped_masked_user_rw_access - Start a scoped __user_masked_rw_access()
+ * @_uptr	Pointer to the user space address to read from and write to
+ * @_ecode:	Code to inject for the failure case
+ *
+ * For further information see __scoped_masked_user_access() above.
+ */
+#define scoped_masked_user_rw_access(_uptr, _ecode, _code)	\
+	__scoped_masked_user_access(rw, (_uptr), _ecode, ({_code}))
+
+/**
+ * scoped_get_user - Read user memory from within a scoped masked access section
+ * @_dst:	The destination variable to store the read value
+ * @_usrc:	Pointer to the user space address to read from
+ */
+#define scoped_get_user(_dst, _usrc)				\
+	unsafe_get_user((_dst), (_usrc), scope_fault)
+
+/**
+ * scoped_put_user - Write user memory from within a scoped masked access section
+ * @_val:	Value to write
+ * @_udst:	Pointer to the user space address to write to
+ */
+#define scoped_put_user(_val, _udst)				\
+	unsafe_put_user((_val), (_udst), scope_fault)
+
 #ifdef CONFIG_HARDENED_USERCOPY
 void __noreturn usercopy_abort(const char *name, const char *detail,
 			       bool to_user, unsigned long offset,


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ