[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <73e6ddee-5ca9-64ea-d8d3-fabe046691fd@redhat.com>
Date: Wed, 31 Jul 2019 15:14:54 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
Radim Krčmář <rkrcmar@...hat.com>,
Joerg Roedel <joro@...tes.org>,
Jim Mattson <jmattson@...gle.com>
Subject: Re: [PATCH RFC 4/5] x86: KVM: add xsetbv to the emulator
On 31/07/19 15:07, Vitaly Kuznetsov wrote:
> Paolo Bonzini <pbonzini@...hat.com> writes:
>
>> On 20/06/19 13:02, Vitaly Kuznetsov wrote:
>>> To avoid hardcoding xsetbv length to '3' we need to support decoding it in
>>> the emulator.
>>>
>>> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
>>
>> Can you also emulate it properly? The code from QEMU's
>> target/i386/fpu_helper.c can help. :)
>>
>
> (Had a chance to get back to this just now, sorry)
>
> Assuming __kvm_set_xcr() is also a correct implementation, would the
> code below do the job? (Just trying to figure out why you suggested
> me to take a look at QEMU's variant):
>
Yes, I didn't remember __kvm_set_xcr.
Paolo
> diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> index feab24cac610..77cf6c11f66b 100644
> --- a/arch/x86/include/asm/kvm_emulate.h
> +++ b/arch/x86/include/asm/kvm_emulate.h
> @@ -229,7 +229,7 @@ struct x86_emulate_ops {
> int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
> const char *smstate);
> void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
> -
> + int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
> };
>
> typedef u32 __attribute__((vector_size(16))) sse128_t;
> @@ -429,6 +429,7 @@ enum x86_intercept {
> x86_intercept_ins,
> x86_intercept_out,
> x86_intercept_outs,
> + x86_intercept_xsetbv,
>
> nr_x86_intercepts
> };
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index 718f7d9afedc..f9e843dd992a 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -4156,6 +4156,20 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
> return rc;
> }
>
> +static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
> +{
> + u32 eax, ecx, edx;
> +
> + eax = reg_read(ctxt, VCPU_REGS_RAX);
> + edx = reg_read(ctxt, VCPU_REGS_RDX);
> + ecx = reg_read(ctxt, VCPU_REGS_RCX);
> +
> + if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
> + return emulate_gp(ctxt, 0);
> +
> + return X86EMUL_CONTINUE;
> +}
> +
> static bool valid_cr(int nr)
> {
> switch (nr) {
> @@ -4409,6 +4423,12 @@ static const struct opcode group7_rm1[] = {
> N, N, N, N, N, N,
> };
>
> +static const struct opcode group7_rm2[] = {
> + N,
> + II(ImplicitOps | Priv, em_xsetbv, xsetbv),
> + N, N, N, N, N, N,
> +};
> +
> static const struct opcode group7_rm3[] = {
> DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
> II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
> @@ -4498,7 +4518,8 @@ static const struct group_dual group7 = { {
> }, {
> EXT(0, group7_rm0),
> EXT(0, group7_rm1),
> - N, EXT(0, group7_rm3),
> + EXT(0, group7_rm2),
> + EXT(0, group7_rm3),
> II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
> II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
> EXT(0, group7_rm7),
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index c6d951cbd76c..9512cc38dfe9 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -6068,6 +6068,11 @@ static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
> kvm_smm_changed(emul_to_vcpu(ctxt));
> }
>
> +static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
> +{
> + return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
> +}
> +
> static const struct x86_emulate_ops emulate_ops = {
> .read_gpr = emulator_read_gpr,
> .write_gpr = emulator_write_gpr,
> @@ -6109,6 +6114,7 @@ static const struct x86_emulate_ops emulate_ops = {
> .set_hflags = emulator_set_hflags,
> .pre_leave_smm = emulator_pre_leave_smm,
> .post_leave_smm = emulator_post_leave_smm,
> + .set_xcr = emulator_set_xcr,
> };
>
> static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
>
Powered by blists - more mailing lists