lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220126173358.2951879-2-scgl@linux.ibm.com>
Date:   Wed, 26 Jan 2022 18:33:57 +0100
From:   Janis Schoetterl-Glausch <scgl@...ux.ibm.com>
To:     Arnd Bergmann <arnd@...db.de>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Heiko Carstens <hca@...ux.ibm.com>
Cc:     Janis Schoetterl-Glausch <scgl@...ux.ibm.com>,
        Alexander Viro <viro@...iv.linux.org.uk>,
        Kees Cook <keescook@...omium.org>,
        Christian Borntraeger <borntraeger@...ux.ibm.com>,
        linux-kernel@...r.kernel.org
Subject: [RFC PATCH 1/2] uaccess: Add mechanism for key checked access to user memory

KVM on s390 needs a mechanism to do accesses to guest memory
that honors storage key protection.

On s390 each physical page is associated with 4 access control bits.
On access, these are compared with an access key, which is either
provided by the instruction or taken from the CPU state.
Based on that comparison, the access either succeeds or is prevented.

KVM on s390 needs to be able emulate this behavior, for example during
instruction emulation, when it makes accesses on behalf of the guest.
Introduce ...copy_{from,to}_user_key functions KVM can use to achieve
this. These differ from their non key counterparts by having an
additional key argument, and delegating to raw_copy_from/to_user_key
instead of raw_copy_{from,to}_user. Otherwise they are the same.
If they were to be maintained in architecture specific code they would
be prone to going out of sync with their non key counterparts.
To prevent this, add them to include/linux/uaccess.h.
In order to allow use of ...copy_{from,to}_user_key from common code,
the key argument is ignored on architectures that do not provide
raw_copy_{from,to}_user_key and the functions become functionally
identical to ...copy_{from,to}_user.

Signed-off-by: Janis Schoetterl-Glausch <scgl@...ux.ibm.com>
---
 include/linux/uaccess.h | 107 ++++++++++++++++++++++++++++++++++++++++
 lib/usercopy.c          |  33 +++++++++++++
 2 files changed, 140 insertions(+)

diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ac0394087f7d..cba64cd23193 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -93,6 +93,11 @@ static inline void force_uaccess_end(mm_segment_t oldfs)
  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
  * but both source and destination are __user pointers (affected by set_fs()
  * as usual) and both source and destination can trigger faults.
+ *
+ * Architectures can also provide raw_copy_{from,to}_user_key variants that take
+ * an additional key argument that can be used for additional memory protection
+ * checks. If these variants are not provided, ...copy_{from,to}_user_key are
+ * identical to their non key counterparts.
  */
 
 static __always_inline __must_check unsigned long
@@ -201,6 +206,108 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 	return n;
 }
 
+/*
+ * ...copy_{from,to}_user_key variants
+ * must be kept in sync with their non key counterparts.
+ */
+#ifndef raw_copy_from_user_key
+static __always_inline unsigned long __must_check
+raw_copy_from_user_key(void *to, const void __user *from, unsigned long n,
+		       unsigned long key)
+{
+	return raw_copy_from_user(to, from, n);
+}
+#endif
+static __always_inline __must_check unsigned long
+__copy_from_user_key(void *to, const void __user *from, unsigned long n,
+		     unsigned long key)
+{
+	might_fault();
+	if (should_fail_usercopy())
+		return n;
+	instrument_copy_from_user(to, from, n);
+	check_object_size(to, n, false);
+	return raw_copy_from_user_key(to, from, n, key);
+}
+
+#ifdef INLINE_COPY_FROM_USER_KEY
+static inline __must_check unsigned long
+_copy_from_user_key(void *to, const void __user *from, unsigned long n,
+		    unsigned long key)
+{
+	unsigned long res = n;
+	might_fault();
+	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+		instrument_copy_from_user(to, from, n);
+		res = raw_copy_from_user_key(to, from, n, key);
+	}
+	if (unlikely(res))
+		memset(to + (n - res), 0, res);
+	return res;
+}
+#else
+extern __must_check unsigned long
+_copy_from_user_key(void *, const void __user *, unsigned long, unsigned long);
+#endif
+
+#ifndef raw_copy_to_user_key
+static __always_inline unsigned long __must_check
+raw_copy_to_user_key(void __user *to, const void *from, unsigned long n,
+		     unsigned long key)
+{
+	return raw_copy_to_user(to, from, n);
+}
+#endif
+
+static __always_inline __must_check unsigned long
+__copy_to_user_key(void __user *to, const void *from, unsigned long n,
+		   unsigned long key)
+{
+	might_fault();
+	if (should_fail_usercopy())
+		return n;
+	instrument_copy_to_user(to, from, n);
+	check_object_size(from, n, true);
+	return raw_copy_to_user_key(to, from, n, key);
+}
+
+#ifdef INLINE_COPY_TO_USER_KEY
+static inline __must_check unsigned long
+_copy_to_user_key(void __user *to, const void *from, unsigned long n,
+		  unsigned long key)
+{
+	might_fault();
+	if (should_fail_usercopy())
+		return n;
+	if (access_ok(to, n)) {
+		instrument_copy_to_user(to, from, n);
+		n = raw_copy_to_user_key(to, from, n, key);
+	}
+	return n;
+}
+#else
+extern __must_check unsigned long
+_copy_to_user_key(void __user *, const void *, unsigned long, unsigned long);
+#endif
+
+static __always_inline unsigned long __must_check
+copy_from_user_key(void *to, const void __user *from, unsigned long n,
+		   unsigned long key)
+{
+	if (likely(check_copy_size(to, n, false)))
+		n = _copy_from_user(to, from, n);
+	return n;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user_key(void __user *to, const void *from, unsigned long n,
+		 unsigned long key)
+{
+	if (likely(check_copy_size(from, n, true)))
+		n = _copy_to_user(to, from, n);
+	return n;
+}
+
 #ifndef copy_mc_to_kernel
 /*
  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 7413dd300516..c13394d0f306 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -37,6 +37,39 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
 EXPORT_SYMBOL(_copy_to_user);
 #endif
 
+#ifndef INLINE_COPY_FROM_USER_KEY
+unsigned long _copy_from_user_key(void *to, const void __user *from,
+				  unsigned long n, unsigned long key)
+{
+	unsigned long res = n;
+	might_fault();
+	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+		instrument_copy_from_user(to, from, n);
+		res = raw_copy_from_user_key(to, from, n, key);
+	}
+	if (unlikely(res))
+		memset(to + (n - res), 0, res);
+	return res;
+}
+EXPORT_SYMBOL(_copy_from_user_key);
+#endif
+
+#ifndef INLINE_COPY_TO_USER_KEY
+unsigned long _copy_to_user_key(void __user *to, const void *from,
+				unsigned long n, unsigned long key)
+{
+	might_fault();
+	if (should_fail_usercopy())
+		return n;
+	if (likely(access_ok(to, n))) {
+		instrument_copy_to_user(to, from, n);
+		n = raw_copy_to_user_key(to, from, n, key);
+	}
+	return n;
+}
+EXPORT_SYMBOL(_copy_to_user_key);
+#endif
+
 /**
  * check_zeroed_user: check if a userspace buffer only contains zero bytes
  * @from: Source address, in userspace.
-- 
2.32.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ