[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200520172145.23284-23-pbonzini@redhat.com>
Date: Wed, 20 May 2020 13:21:43 -0400
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: vkuznets@...hat.com, Joerg Roedel <jroedel@...e.de>
Subject: [PATCH 22/24] uaccess: add memzero_user
This will be used from KVM. Add it to lib/ so that everyone can use it.
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
include/linux/uaccess.h | 1 +
lib/usercopy.c | 63 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 64 insertions(+)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 67f016010aad..bd8c85b50e67 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -232,6 +232,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
#endif /* ARCH_HAS_NOCACHE_UACCESS */
extern __must_check int check_zeroed_user(const void __user *from, size_t size);
+extern __must_check int memzero_user(void __user *from, size_t size);
/**
* copy_struct_from_user: copy a struct from userspace
diff --git a/lib/usercopy.c b/lib/usercopy.c
index cbb4d9ec00f2..82997862bf02 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -33,6 +33,69 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
EXPORT_SYMBOL(_copy_to_user);
#endif
+/**
+ * memzero_user: write zero bytes to a userspace buffer
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memset(from, 0, size)" for
+ * userspace addresses.
+ *
+ * Returns:
+ * * 0: zeroes have been written to the buffer
+ * * -EFAULT: access to userspace failed.
+ */
+int memzero_user(void __user *from, size_t size)
+{
+ unsigned long val = 0;
+ unsigned long mask = 0;
+ uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+ if (unlikely(size == 0))
+ return 0;
+
+ from -= align;
+ size += align;
+
+ if (!user_access_begin(from, ALIGN_UP(size, sizeof(unsigned long))))
+ return -EFAULT;
+
+ if (align) {
+ unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+ /* Prepare a mask to keep the first "align" bytes. */
+ mask = aligned_byte_mask(align);
+ }
+
+ if (size >= sizeof(unsigned long)) {
+ /* The mask only applies to the first full word. */
+ val &= mask;
+ mask = 0;
+ do {
+ unsafe_put_user(val, (unsigned long __user *) from, err_fault);
+ from += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+ val = 0;
+ } while (size >= sizeof(unsigned long));
+
+ if (!size)
+ goto done;
+ unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+ }
+
+ /* Bytes after the first "size" have to be kept too. */
+ mask |= ~aligned_byte_mask(size);
+ val &= mask;
+ unsafe_put_user(val, (unsigned long __user *) from, err_fault);
+
+done:
+ user_access_end();
+ return 0;
+err_fault:
+ user_access_end();
+ return -EFAULT;
+}
+EXPORT_SYMBOL(memzero_user);
+
/**
* check_zeroed_user: check if a userspace buffer only contains zero bytes
* @from: Source address, in userspace.
--
2.18.2
Powered by blists - more mailing lists