[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d57ad4d4-db52-9e50-dd9d-667f9fc24625@oracle.com>
Date: Mon, 21 Nov 2022 16:31:06 +0000
From: Liam Merwick <liam.merwick@...cle.com>
To: Maxim Levitsky <mlevitsk@...hat.com>, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org,
Chenyi Qiang <chenyi.qiang@...el.com>,
Yang Zhong <yang.zhong@...el.com>, x86@...nel.org,
Shuah Khan <shuah@...nel.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>,
Colton Lewis <coltonlewis@...gle.com>,
Borislav Petkov <bp@...en8.de>, Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Jim Mattson <jmattson@...gle.com>,
linux-kselftest@...r.kernel.org, Ingo Molnar <mingo@...hat.com>,
Wei Wang <wei.w.wang@...el.com>,
David Matlack <dmatlack@...gle.com>, stable@...r.kernel.org,
Liam Merwick <liam.merwick@...cle.com>
Subject: Re: [PATCH v2 3/9] KVM: x86: add kvm_leave_nested
On 03/11/2022 14:13, Maxim Levitsky wrote:
> add kvm_leave_nested which wraps a call to nested_ops->leave_nested
> into a function.
>
> Cc: stable@...r.kernel.org
> Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
Reviewed-by: Liam Merwick <liam.merwick@...cle.com>
> ---
> arch/x86/kvm/svm/nested.c | 3 ---
> arch/x86/kvm/vmx/nested.c | 3 ---
> arch/x86/kvm/x86.c | 8 +++++++-
> 3 files changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index b74da40c1fc40c..bcc4f6620f8aec 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -1147,9 +1147,6 @@ void svm_free_nested(struct vcpu_svm *svm)
> svm->nested.initialized = false;
> }
>
> -/*
> - * Forcibly leave nested mode in order to be able to reset the VCPU later on.
> - */
> void svm_leave_nested(struct kvm_vcpu *vcpu)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 61a2e551640a08..1ebe141a0a015f 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -6441,9 +6441,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
> return kvm_state.size;
> }
>
> -/*
> - * Forcibly leave nested mode in order to be able to reset the VCPU later on.
> - */
> void vmx_leave_nested(struct kvm_vcpu *vcpu)
> {
> if (is_guest_mode(vcpu)) {
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index cd9eb13e2ed7fc..316ab1d5317f92 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -627,6 +627,12 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
> ex->payload = payload;
> }
>
> +/* Forcibly leave the nested mode in cases like a vCPU reset */
> +static void kvm_leave_nested(struct kvm_vcpu *vcpu)
> +{
> + kvm_x86_ops.nested_ops->leave_nested(vcpu);
> +}
> +
> static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
> unsigned nr, bool has_error, u32 error_code,
> bool has_payload, unsigned long payload, bool reinject)
> @@ -5193,7 +5199,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
> if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
> #ifdef CONFIG_KVM_SMM
> if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
> - kvm_x86_ops.nested_ops->leave_nested(vcpu);
> + kvm_leave_nested(vcpu);
> kvm_smm_changed(vcpu, events->smi.smm);
> }
>
Powered by blists - more mailing lists