[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bbf53506-43df-78cc-c954-ed4e8384b1d2@redhat.com>
Date: Tue, 12 Oct 2021 19:30:58 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, "Chang S. Bae" <chang.seok.bae@...el.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
kvm@...r.kernel.org
Subject: Re: [patch 15/31] x86/fpu: Rework copy_xstate_to_uabi_buf()
On 12/10/21 02:00, Thomas Gleixner wrote:
> Prepare for replacing the KVM copy xstate to user function by extending
> copy_xstate_to_uabi_buf() with a pkru argument which allows the caller to
> hand in the pkru value, which is required for KVM because the guest PKRU is
> not accessible via current. Fixup all callsites accordingly.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> ---
> arch/x86/kernel/fpu/xstate.c | 34 ++++++++++++++++++++++++++--------
> arch/x86/kernel/fpu/xstate.h | 3 +++
> 2 files changed, 29 insertions(+), 8 deletions(-)
>
> --- a/arch/x86/kernel/fpu/xstate.c
> +++ b/arch/x86/kernel/fpu/xstate.c
> @@ -940,9 +940,10 @@ static void copy_feature(bool from_xstat
> }
>
> /**
> - * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
> + * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
> * @to: membuf descriptor
> - * @tsk: The task from which to copy the saved xstate
> + * @xsave: The xsave from which to copy
> + * @pkru_val: The PKRU value to store in the PKRU component
> * @copy_mode: The requested copy mode
> *
> * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
> @@ -951,11 +952,10 @@ static void copy_feature(bool from_xstat
> *
> * It supports partial copy but @to.pos always starts from zero.
> */
> -void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
> - enum xstate_copy_mode copy_mode)
> +void __copy_xstate_to_uabi_buf(struct membuf to, struct xregs_state *xsave,
> + u32 pkru_val, enum xstate_copy_mode copy_mode)
> {
> const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
> - struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
> struct xregs_state *xinit = &init_fpstate.xsave;
> struct xstate_header header;
> unsigned int zerofrom;
> @@ -1033,10 +1033,9 @@ void copy_xstate_to_uabi_buf(struct memb
> struct pkru_state pkru = {0};
> /*
> * PKRU is not necessarily up to date in the
> - * thread's XSAVE buffer. Fill this part from the
> - * per-thread storage.
> + * XSAVE buffer. Use the provided value.
> */
> - pkru.pkru = tsk->thread.pkru;
> + pkru.pkru = pkru_val;
> membuf_write(&to, &pkru, sizeof(pkru));
> } else {
> copy_feature(header.xfeatures & BIT_ULL(i), &to,
> @@ -1056,6 +1055,25 @@ void copy_xstate_to_uabi_buf(struct memb
> membuf_zero(&to, to.left);
> }
>
> +/**
> + * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
> + * @to: membuf descriptor
> + * @tsk: The task from which to copy the saved xstate
> + * @copy_mode: The requested copy mode
> + *
> + * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
> + * format, i.e. from the kernel internal hardware dependent storage format
> + * to the requested @mode. UABI XSTATE is always uncompacted!
> + *
> + * It supports partial copy but @to.pos always starts from zero.
> + */
> +void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
> + enum xstate_copy_mode copy_mode)
> +{
> + __copy_xstate_to_uabi_buf(to, &tsk->thread.fpu.state.xsave,
> + tsk->thread.pkru, copy_mode);
> +}
> +
> static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
> const void *kbuf, const void __user *ubuf)
> {
> --- a/arch/x86/kernel/fpu/xstate.h
> +++ b/arch/x86/kernel/fpu/xstate.h
> @@ -15,4 +15,7 @@ static inline void xstate_init_xcomp_bv(
> xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
> }
>
> +extern void __copy_xstate_to_uabi_buf(struct membuf to, struct xregs_state *xsave,
> + u32 pkru_val, enum xstate_copy_mode copy_mode);
> +
> #endif
>
Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
Powered by blists - more mailing lists