[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 2 Feb 2021 18:24:30 +0100
From: Paolo Bonzini <pbonzini@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Subject: Re: [PATCH 1/3] KVM: x86: move kvm_inject_gp up from kvm_set_xcr to
callers
On 02/02/21 18:19, Sean Christopherson wrote:
> On Tue, Feb 02, 2021, Paolo Bonzini wrote:
>> Push the injection of #GP up to the callers, so that they can just use
>> kvm_complete_insn_gp.
>
> The SVM and VMX code is identical, IMO we should push all the code to x86.c
> instead of shuffling it around.
>
> I'd also like to change svm_exit_handlers to take @vcpu instead of @svm so that
> SVM can invoke common handlers directly.
>
> If you agree, I'll send a proper series to do the above, plus whatever other
> cleanups I find, e.g. INVD, WBINVD, etc...
Yes, why not. There's a lot of things that are only slightly different
between VMX and SVM for no particular reason.
Paolo
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index fa7b2df6422b..bf917efde35c 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1530,7 +1530,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
> unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
> void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
> void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
> -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
> +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
>
> int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
> int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 687876211ebe..842a74d88f1b 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2334,14 +2334,7 @@ static int wbinvd_interception(struct vcpu_svm *svm)
>
> static int xsetbv_interception(struct vcpu_svm *svm)
> {
> - u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
> - u32 index = kvm_rcx_read(&svm->vcpu);
> -
> - if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
> - return kvm_skip_emulated_instruction(&svm->vcpu);
> - }
> -
> - return 1;
> + return kvm_emulate_xsetbv(&svm->vcpu);
> }
>
> static int rdpru_interception(struct vcpu_svm *svm)
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index cf0c397dc3eb..474a169835de 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -5218,16 +5218,6 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
> return kvm_emulate_wbinvd(vcpu);
> }
>
> -static int handle_xsetbv(struct kvm_vcpu *vcpu)
> -{
> - u64 new_bv = kvm_read_edx_eax(vcpu);
> - u32 index = kvm_rcx_read(vcpu);
> -
> - if (kvm_set_xcr(vcpu, index, new_bv) == 0)
> - return kvm_skip_emulated_instruction(vcpu);
> - return 1;
> -}
> -
> static int handle_apic_access(struct kvm_vcpu *vcpu)
> {
> if (likely(fasteoi)) {
> @@ -5689,7 +5679,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
> [EXIT_REASON_APIC_WRITE] = handle_apic_write,
> [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
> [EXIT_REASON_WBINVD] = handle_wbinvd,
> - [EXIT_REASON_XSETBV] = handle_xsetbv,
> + [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv,
> [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
> [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
> [EXIT_REASON_GDTR_IDTR] = handle_desc,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 14fb8a138ec3..ef630f8d8bd2 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -984,16 +984,17 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
> return 0;
> }
>
> -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
> +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
> {
> if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
> - __kvm_set_xcr(vcpu, index, xcr)) {
> + __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
> kvm_inject_gp(vcpu, 0);
> return 1;
> }
> - return 0;
> +
> + return kvm_skip_emulated_instruction(vcpu);
> }
> -EXPORT_SYMBOL_GPL(kvm_set_xcr);
> +EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
>
> bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> {
>
>
Powered by blists - more mailing lists