[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ce45a9cb-44a4-4a3d-bc81-608a0871ae6d@intel.com>
Date: Thu, 28 Nov 2024 11:08:08 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Tom Lendacky <thomas.lendacky@....com>, Binbin Wu
<binbin.wu@...ux.intel.com>, Isaku Yamahata <isaku.yamahata@...el.com>,
Kai Huang <kai.huang@...el.com>
Subject: Re: [PATCH v4 5/6] KVM: x86: Always complete hypercall via function
callback
On 11/28/2024 8:43 AM, Sean Christopherson wrote:
> Finish "emulation" of KVM hypercalls by function callback, even when the
> hypercall is handled entirely within KVM, i.e. doesn't require an exit to
> userspace, and refactor __kvm_emulate_hypercall()'s return value to *only*
> communicate whether or not KVM should exit to userspace or resume the
> guest.
>
> (Ab)Use vcpu->run->hypercall.ret to propagate the return value to the
> callback, purely to avoid having to add a trampoline for every completion
> callback.
>
> Using the function return value for KVM's control flow eliminates the
> multiplexed return value, where '0' for KVM_HC_MAP_GPA_RANGE (and only
> that hypercall) means "exit to userspace".
>
> Note, the unnecessary extra indirect call and thus potential retpoline
> will be eliminated in the near future by converting the intermediate layer
> to a macro.
>
> Suggested-by: Binbin Wu <binbin.wu@...ux.intel.com>
> Suggested-by: Kai Huang <kai.huang@...el.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/x86.c | 29 ++++++++++++-----------------
> arch/x86/kvm/x86.h | 10 ++++++----
> 2 files changed, 18 insertions(+), 21 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 11434752b467..39be2a891ab4 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9982,10 +9982,11 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
> return kvm_skip_emulated_instruction(vcpu);
> }
>
> -unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
> - unsigned long a0, unsigned long a1,
> - unsigned long a2, unsigned long a3,
> - int op_64_bit, int cpl)
> +int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
> + unsigned long a0, unsigned long a1,
> + unsigned long a2, unsigned long a3,
> + int op_64_bit, int cpl,
> + int (*complete_hypercall)(struct kvm_vcpu *))
> {
> unsigned long ret;
>
> @@ -10061,7 +10062,7 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
> vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
>
> WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
> - vcpu->arch.complete_userspace_io = complete_hypercall_exit;
> + vcpu->arch.complete_userspace_io = complete_hypercall;
> /* stat is incremented on completion. */
> return 0;
> }
> @@ -10071,13 +10072,15 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
> }
>
> out:
> - return ret;
> + vcpu->run->hypercall.ret = ret;
> + complete_hypercall(vcpu);
> + return 1;
shouldn't it be
return complete_hypercall(vcpu);
?
Originally, kvm_emulate_hypercall() returns
kvm_skip_emulated_instruction(). Now it becomes
kvm_skip_emulated_instruction();
return 1;
I don't go deep to see if kvm_skip_emulated_instruction() always return
1 for this case. But logically,
return complete_hypercall(vcpu);
looks more reasonable.
> }
> EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
>
> int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
> {
> - unsigned long nr, a0, a1, a2, a3, ret;
> + unsigned long nr, a0, a1, a2, a3;
> int op_64_bit;
> int cpl;
>
> @@ -10095,16 +10098,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
> op_64_bit = is_64_bit_hypercall(vcpu);
> cpl = kvm_x86_call(get_cpl)(vcpu);
>
> - ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl);
> - if (nr == KVM_HC_MAP_GPA_RANGE && !ret)
> - /* MAP_GPA tosses the request to the user space. */
> - return 0;
> -
> - if (!op_64_bit)
> - ret = (u32)ret;
> - kvm_rax_write(vcpu, ret);
> -
> - return kvm_skip_emulated_instruction(vcpu);
> + return __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl,
> + complete_hypercall_exit);
> }
> EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
>
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 6db13b696468..28adc8ea04bf 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -617,10 +617,12 @@ static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
> return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
> }
>
> -unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
> - unsigned long a0, unsigned long a1,
> - unsigned long a2, unsigned long a3,
> - int op_64_bit, int cpl);
> +int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
> + unsigned long a0, unsigned long a1,
> + unsigned long a2, unsigned long a3,
> + int op_64_bit, int cpl,
> + int (*complete_hypercall)(struct kvm_vcpu *));
> +
> int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
>
> #endif
Powered by blists - more mailing lists