[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210830162545.766864034@linutronix.de>
Date: Mon, 30 Aug 2021 18:27:33 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Al Viro <viro@...iv.linux.org.uk>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: [patch 08/10] x86/fpu/signal: Change return type of
__fpu_restore_sig() to boolean
Now that fpu__restore_sig() returns a boolean get rid of the individual
error codes in __fpu_restore_sig() as well.
Suggested-by: Al Viro <viro@...iv.linux.org.uk>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/kernel/fpu/signal.c | 41 ++++++++++++++++++++---------------------
1 file changed, 20 insertions(+), 21 deletions(-)
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -310,8 +310,8 @@ static int restore_fpregs_from_user(void
return 0;
}
-static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
- bool ia32_fxstate)
+static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
+ bool ia32_fxstate)
{
int state_size = fpu_kernel_xstate_size;
struct task_struct *tsk = current;
@@ -319,14 +319,14 @@ static int __fpu_restore_sig(void __user
struct user_i387_ia32_struct env;
u64 user_xfeatures = 0;
bool fx_only = false;
- int ret;
+ bool success;
+
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
- ret = check_xstate_in_sigframe(buf_fx, &fx_sw_user);
- if (unlikely(ret))
- return ret;
+ if (check_xstate_in_sigframe(buf_fx, &fx_sw_user))
+ return false;
fx_only = !fx_sw_user.magic1;
state_size = fx_sw_user.xstate_size;
@@ -342,8 +342,8 @@ static int __fpu_restore_sig(void __user
* faults. If it does, fall back to the slow path below, going
* through the kernel buffer with the enabled pagefault handler.
*/
- return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
- state_size);
+ return !restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
+ state_size);
}
/*
@@ -351,9 +351,8 @@ static int __fpu_restore_sig(void __user
* to be ignored for histerical raisins. The legacy state is folded
* in once the larger state has been copied.
*/
- ret = __copy_from_user(&env, buf, sizeof(env));
- if (ret)
- return ret;
+ if (__copy_from_user(&env, buf, sizeof(env)))
+ return false;
/*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
@@ -380,17 +379,16 @@ static int __fpu_restore_sig(void __user
fpregs_unlock();
if (use_xsave() && !fx_only) {
- ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx);
- if (ret)
- return ret;
+ if (copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx))
+ return false;
} else {
if (__copy_from_user(&fpu->state.fxsave, buf_fx,
sizeof(fpu->state.fxsave)))
- return -EFAULT;
+ return false;
/* Reject invalid MXCSR values. */
if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
- return -EINVAL;
+ return false;
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
if (use_xsave())
@@ -414,17 +412,18 @@ static int __fpu_restore_sig(void __user
u64 mask = user_xfeatures | xfeatures_mask_supervisor();
fpu->state.xsave.header.xfeatures &= mask;
- ret = os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all);
+ success = !os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all);
} else {
- ret = fxrstor_safe(&fpu->state.fxsave);
+ success = !fxrstor_safe(&fpu->state.fxsave);
}
- if (likely(!ret))
+ if (likely(success))
fpregs_mark_activate();
fpregs_unlock();
- return ret;
+ return success;
}
+
static inline int xstate_sigframe_size(void)
{
return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
@@ -468,7 +467,7 @@ bool fpu__restore_sig(void __user *buf,
sizeof(struct user_i387_ia32_struct),
NULL, buf);
} else {
- success = !__fpu_restore_sig(buf, buf_fx, ia32_fxstate);
+ success = __fpu_restore_sig(buf, buf_fx, ia32_fxstate);
}
out:
Powered by blists - more mailing lists