[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZWjUZPXCF2U9azWT@google.com>
Date: Thu, 30 Nov 2023 10:28:52 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>
Cc: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
Maxim Levitsky <mlevitsk@...hat.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 10/14] KVM: x86: Make Hyper-V emulation optional
On Thu, Nov 30, 2023, Vitaly Kuznetsov wrote:
> Sean Christopherson <seanjc@...gle.com> writes:
>
> ...
>
> >
> >> static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
> >> {
> >> @@ -3552,11 +3563,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
> >> if (!nested_vmx_check_permission(vcpu))
> >> return 1;
> >>
> >> +#ifdef CONFIG_KVM_HYPERV
> >> evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
> >> if (evmptrld_status == EVMPTRLD_ERROR) {
> >> kvm_queue_exception(vcpu, UD_VECTOR);
> >> return 1;
> >> }
> >> +#endif
> >>
> >> kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
> >
> > This fails to build with CONFIG_KVM_HYPERV=n && CONFIG_KVM_WERROR=y:
> >
> > arch/x86/kvm/vmx/nested.c:3577:9: error: variable 'evmptrld_status' is uninitialized when used here [-Werror,-Wuninitialized]
> > if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
> > ^~~~~~~~~~~~~~~
> >
> > Sadly, simply wrapping with an #ifdef also fails because then evmptrld_status
> > becomes unused. I'd really prefer to avoid having to tag it __maybe_unused, and
> > adding more #ifdef would also be ugly. Any ideas?
>
> A couple,
>
> - we can try putting all eVMPTR logic under 'if (1)' just to create a
> block where we can define evmptrld_status. Not sure this is nicer than
> another #ifdef wrapping evmptrld_status and I'm not sure what to do
> with kvm_pmu_trigger_event() -- can it just go above
> nested_vmx_handle_enlightened_vmptrld()?
>
> - we can add a helper, e.g. 'evmptr_is_vmfail()' and make it just return
> 'false' when !CONFIG_KVM_HYPERV.
>
> - rewrite this as a switch to avoid the need for having the local
> variable, (untested)
>
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index c5ec0ef51ff7..b26ce7d596e9 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3553,22 +3553,23 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
> enum nvmx_vmentry_status status;
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
> - enum nested_evmptrld_status evmptrld_status;
>
> if (!nested_vmx_check_permission(vcpu))
> return 1;
>
> - evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
> - if (evmptrld_status == EVMPTRLD_ERROR) {
> + switch (nested_vmx_handle_enlightened_vmptrld(vcpu, launch)) {
> + case EVMPTRLD_ERROR:
> kvm_queue_exception(vcpu, UD_VECTOR);
> return 1;
> + case EVMPTRLD_VMFAIL:
> + trace_kvm_nested_vmenter_failed("evmptrld_status", 0);
> + return nested_vmx_failInvalid(vcpu);
> + default:
> + break;
> }
>
> kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
>
> - if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
> - return nested_vmx_failInvalid(vcpu);
> -
> if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
> vmx->nested.current_vmptr == INVALID_GPA))
> return nested_vmx_failInvalid(vcpu);
>
> Unfortunately, I had to open code CC() ;-(
>
> Or maybe just another "#ifdef" is not so ugly after all? :-)
Ah, just have nested_vmx_handle_enlightened_vmptrld() return EVMPTRLD_DISABLED
for its "stub", e.g. with some otehr tangentially de-stubbing:
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 4777d867419c..5a27a2ebbb32 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2000,6 +2000,7 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
struct kvm_vcpu *vcpu, bool from_launch)
{
+#ifdef CONFIG_KVM_HYPERV
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool evmcs_gpa_changed = false;
u64 evmcs_gpa;
@@ -2081,11 +2082,10 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
}
return EVMPTRLD_SUCCEEDED;
+#else
+ return EVMPTRLD_DISABLED;
+#endif
}
-#else /* CONFIG_KVM_HYPERV */
-static inline void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) {}
-static inline void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) {}
-#endif /* CONFIG_KVM_HYPERV */
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
{
@@ -3191,8 +3191,6 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
return true;
}
-#else
-static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu) { return true; }
#endif
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
@@ -3285,6 +3283,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
+#ifdef CONFIG_KVM_HYPERV
/*
* Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
* in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
@@ -3301,7 +3300,7 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
return false;
}
-
+#endif
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
@@ -3564,7 +3563,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (!nested_vmx_check_permission(vcpu))
return 1;
-#ifdef CONFIG_KVM_HYPERV
evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
if (evmptrld_status == EVMPTRLD_ERROR) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -4743,6 +4741,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
WARN_ON_ONCE(vmx->nested.nested_run_pending);
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+#ifdef CONFIG_KVM_HYPERV
/*
* KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
* Enlightened VMCS after migration and we still need to
@@ -4750,6 +4749,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
* the first L2 run.
*/
(void)nested_get_evmcs_page(vcpu);
+#endif
}
/* Service pending TLB flush requests for L2 before switching to L1. */
Powered by blists - more mailing lists