[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1485426179-13681-13-git-send-email-mingo@kernel.org>
Date: Thu, 26 Jan 2017 11:22:57 +0100
From: Ingo Molnar <mingo@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Andy Lutomirski <luto@...capital.net>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Fenghua Yu <fenghua.yu@...el.com>,
"H . Peter Anvin" <hpa@...or.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Oleg Nesterov <oleg@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Rik van Riel <riel@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Yu-cheng Yu <yu-cheng.yu@...el.com>
Subject: [PATCH 12/14] x86/fpu: Remove 'ubuf' parameter from the copy_kernel_to_xstate() API
No change in functionality.
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Fenghua Yu <fenghua.yu@...el.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Oleg Nesterov <oleg@...hat.com>
Cc: Rik van Riel <riel@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@...el.com>
Cc: Fenghua Yu <fenghua.yu@...el.com>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/x86/include/asm/fpu/xstate.h | 2 +-
arch/x86/kernel/fpu/regset.c | 2 +-
arch/x86/kernel/fpu/xstate.c | 17 +++--------------
3 files changed, 5 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 79af79dbcab6..f10889bc0c88 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -50,6 +50,6 @@ const void *get_xsave_field_ptr(int xstate_field);
int using_compacted_format(void);
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
-int copy_kernel_to_xstate(const void *kbuf, const void __user *ubuf, struct xregs_state *xsave);
+int copy_kernel_to_xstate(const void *kbuf, struct xregs_state *xsave);
int copy_user_to_xstate(const void *kbuf, const void __user *ubuf, struct xregs_state *xsave);
#endif
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index ea9e53d2bd8a..ffcbed521fd9 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -135,7 +135,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
if (kbuf)
- ret = copy_kernel_to_xstate(kbuf, ubuf, xsave);
+ ret = copy_kernel_to_xstate(kbuf, xsave);
else
ret = copy_user_to_xstate(kbuf, ubuf, xsave);
} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index aed9b2cb4d3d..3b89b2784665 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1088,8 +1088,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
* there we check the CPU has XSAVES and a whole standard-sized buffer
* exists.
*/
-int copy_kernel_to_xstate(const void *kbuf, const void __user *ubuf,
- struct xregs_state *xsave)
+int copy_kernel_to_xstate(const void *kbuf, struct xregs_state *xsave)
{
unsigned int offset, size;
int i;
@@ -1099,12 +1098,7 @@ int copy_kernel_to_xstate(const void *kbuf, const void __user *ubuf,
offset = offsetof(struct xregs_state, header);
size = sizeof(xfeatures);
- if (kbuf) {
- memcpy(&xfeatures, kbuf + offset, size);
- } else {
- if (__copy_from_user(&xfeatures, ubuf + offset, size))
- return -EFAULT;
- }
+ memcpy(&xfeatures, kbuf + offset, size);
/*
* Reject if the user sets any disabled or supervisor features:
@@ -1123,12 +1117,7 @@ int copy_kernel_to_xstate(const void *kbuf, const void __user *ubuf,
offset = xstate_offsets[i];
size = xstate_sizes[i];
- if (kbuf) {
- memcpy(dst, kbuf + offset, size);
- } else {
- if (__copy_from_user(dst, ubuf + offset, size))
- return -EFAULT;
- }
+ memcpy(dst, kbuf + offset, size);
}
}
--
2.7.4
Powered by blists - more mailing lists