[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181219213338.26619-2-igor.stoppa@huawei.com>
Date: Wed, 19 Dec 2018 23:33:27 +0200
From: Igor Stoppa <igor.stoppa@...il.com>
To: Andy Lutomirski <luto@...capital.net>,
Matthew Wilcox <willy@...radead.org>,
Peter Zijlstra <peterz@...radead.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Mimi Zohar <zohar@...ux.vnet.ibm.com>
Cc: igor.stoppa@...wei.com, Nadav Amit <nadav.amit@...il.com>,
Kees Cook <keescook@...omium.org>,
linux-integrity@...r.kernel.org,
kernel-hardening@...ts.openwall.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 01/12] x86_64: memset_user()
Create x86_64 specific version of memset for user space, based on
clear_user().
This will be used for implementing wr_memset() in the __wr_after_init
scenario, where write-rare variables have an alternate mapping for
writing.
Signed-off-by: Igor Stoppa <igor.stoppa@...wei.com>
CC: Andy Lutomirski <luto@...capital.net>
CC: Nadav Amit <nadav.amit@...il.com>
CC: Matthew Wilcox <willy@...radead.org>
CC: Peter Zijlstra <peterz@...radead.org>
CC: Kees Cook <keescook@...omium.org>
CC: Dave Hansen <dave.hansen@...ux.intel.com>
CC: Mimi Zohar <zohar@...ux.vnet.ibm.com>
CC: linux-integrity@...r.kernel.org
CC: kernel-hardening@...ts.openwall.com
CC: linux-mm@...ck.org
CC: linux-kernel@...r.kernel.org
---
arch/x86/include/asm/uaccess_64.h | 6 ++++
arch/x86/lib/usercopy_64.c | 54 +++++++++++++++++++++++++++++++
2 files changed, 60 insertions(+)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index a9d637bc301d..f194bfce4866 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -213,4 +213,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len);
unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);
+unsigned long __must_check
+memset_user(void __user *mem, int c, unsigned long len);
+
+unsigned long __must_check
+__memset_user(void __user *mem, int c, unsigned long len);
+
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 1bd837cdc4b1..84f8f8a20b30 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,6 +9,60 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
+/*
+ * Memset Userspace
+ */
+
+unsigned long __memset_user(void __user *addr, int c, unsigned long size)
+{
+ long __d0;
+ unsigned long pattern = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ pattern = (pattern << 8) | (0xFF & c);
+ might_fault();
+ /* no memory constraint: gcc doesn't know about this memory */
+ stac();
+ asm volatile(
+ " movq %[val], %%rdx\n"
+ " testq %[size8],%[size8]\n"
+ " jz 4f\n"
+ "0: mov %%rdx,(%[dst])\n"
+ " addq $8,%[dst]\n"
+ " decl %%ecx ; jnz 0b\n"
+ "4: movq %[size1],%%rcx\n"
+ " testl %%ecx,%%ecx\n"
+ " jz 2f\n"
+ "1: movb %%dl,(%[dst])\n"
+ " incq %[dst]\n"
+ " decl %%ecx ; jnz 1b\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: lea 0(%[size1],%[size8],8),%[size8]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE_UA(0b, 3b)
+ _ASM_EXTABLE_UA(1b, 2b)
+ : [size8] "=&c"(size), [dst] "=&D" (__d0)
+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+ [val] "ri"(pattern)
+ : "rdx");
+
+ clac();
+ return size;
+}
+EXPORT_SYMBOL(__memset_user);
+
+unsigned long memset_user(void __user *to, int c, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __memset_user(to, c, n);
+ return n;
+}
+EXPORT_SYMBOL(memset_user);
+
+
/*
* Zero Userspace
*/
--
2.19.1
Powered by blists - more mailing lists