[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1476734984-13839-3-git-send-email-riel@redhat.com>
Date: Mon, 17 Oct 2016 16:09:43 -0400
From: riel@...hat.com
To: linux-kernel@...r.kernel.org
Cc: mingo@...nel.org, bp@...en8.de, torvalds@...ux-foundation.org,
luto@...nel.org, dave.hansen@...el.linux.com, tglx@...utronix.de,
hpa@...or.com
Subject: [PATCH RFC 2/3] x86/fpu: prepare misc FPU state handling code for lazy FPU loading
From: Rik van Riel <riel@...hat.com>
The patch that defers loading of FPU state until return to userspace
can result in tasks with an FPU state not having that FPU state loaded
in certain paths in the kernel, which want to access it.
Wrap those code paths in a loop that ensures the FPU state is valid before
any operations begin, and still valid afterwards.
Right now this code does nothing, since the FPU state of a task is loaded
at context switch time and will always be valid.
After the patch that defers loading of FPU state, the condition at the end
of the loop will ensure that the operation is restarted if the task was
context switched away during the operation.
Some of these loops involve code that involve copying FPU state from
or to user space memory. The potential for page faults mean this code
cannot be made non-preemptible, making a retry loop the easiest option.
Sufficiently fast operations to or from kernel memory can simply be run
with preempt disabled.
Signed-off-by: Rik van Riel <riel@...hat.com>
---
arch/x86/kernel/fpu/core.c | 2 ++
arch/x86/kernel/fpu/signal.c | 28 +++++++++++++++++-----------
arch/x86/kernel/fpu/xstate.c | 5 +++++
arch/x86/mm/pkeys.c | 11 +++++++----
4 files changed, 31 insertions(+), 15 deletions(-)
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 30f11ab6c07e..34ba9d47c20f 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -490,11 +490,13 @@ void fpu__clear(struct fpu *fpu)
/* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu);
} else {
+ preempt_disable();
if (!fpu->fpstate_active) {
fpu__activate_curr(fpu);
user_fpu_begin();
}
copy_init_fpstate_to_fpregs();
+ preempt_enable();
}
}
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 83c23c230b4c..15128532aa81 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -121,12 +121,16 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
{
int err;
- if (use_xsave())
- err = copy_xregs_to_user(buf);
- else if (use_fxsr())
- err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
- else
- err = copy_fregs_to_user((struct fregs_state __user *) buf);
+ do {
+ make_fpregs_active();
+
+ if (use_xsave())
+ err = copy_xregs_to_user(buf);
+ else if (use_fxsr())
+ err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
+ else
+ err = copy_fregs_to_user((struct fregs_state __user *) buf);
+ } while (unlikely(!fpregs_active()));
if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
err = -EFAULT;
@@ -350,11 +354,13 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* For 64-bit frames and 32-bit fsave frames, restore the user
* state to the registers directly (with exceptions handled).
*/
- user_fpu_begin();
- if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
- fpu__clear(fpu);
- return -1;
- }
+ do {
+ user_fpu_begin();
+ if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
+ fpu__clear(fpu);
+ return -1;
+ }
+ } while (unlikely(!fpregs_active()));
}
return 0;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 76bc2a1a3a79..798cfb242b18 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -896,6 +896,9 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
/* Shift the bits in to the correct place in PKRU for pkey: */
new_pkru_bits <<= pkey_shift;
+ preempt_disable();
+ make_fpregs_active();
+
/* Get old PKRU and mask off any old bits in place: */
old_pkru = read_pkru();
old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
@@ -903,6 +906,8 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
/* Write old part along with new part: */
write_pkru(old_pkru | new_pkru_bits);
+ preempt_enable();
+
return 0;
}
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index e8c474451928..9eba07353404 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -32,10 +32,13 @@ int __execute_only_pkey(struct mm_struct *mm)
* can make fpregs inactive.
*/
preempt_disable();
- if (fpregs_active() &&
- !__pkru_allows_read(read_pkru(), PKEY_DEDICATED_EXECUTE_ONLY)) {
- preempt_enable();
- return PKEY_DEDICATED_EXECUTE_ONLY;
+ if (fpstate_active()) {
+ make_fpregs_active();
+ if (!__pkru_allows_read(read_pkru(),
+ PKEY_DEDICATED_EXECUTE_ONLY)) {
+ preempt_enable();
+ return PKEY_DEDICATED_EXECUTE_ONLY;
+ }
}
preempt_enable();
ret = arch_set_user_pkey_access(current, PKEY_DEDICATED_EXECUTE_ONLY,
--
2.7.4
Powered by blists - more mailing lists