[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9d0d09da-3920-16d6-11ae-51b864171b66@redhat.com>
Date: Mon, 4 May 2020 15:12:36 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Alexander Graf <graf@...zon.com>,
Sean Christopherson <sean.j.christopherson@...el.com>
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org,
KarimAllah Raslan <karahmed@...zon.de>
Subject: Re: [PATCH v2] KVM: nVMX: Skip IBPB when switching between vmcs01 and
vmcs02
On 04/05/20 14:01, Alexander Graf wrote:
> I like the WARN_ON :). It should be almost free during execution, but
> helps us catch problems early.
Yes, it's nice. I didn't mind the "buddy" argument either, but if we're
going to get a bool I prefer positive logic so I'd like to squash this:
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b57420f3dd8f..299393750a18 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -304,7 +304,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
prev = vmx->loaded_vmcs;
WARN_ON_ONCE(prev->cpu != cpu || prev->vmcs != per_cpu(current_vmcs, cpu));
vmx->loaded_vmcs = vmcs;
- vmx_vcpu_load_vmcs(vcpu, cpu, true);
+
+ /*
+ * This is the same guest from our point of view, so no
+ * indirect branch prediction barrier is needed. The L1
+ * guest can protect itself with retpolines, IBPB or IBRS.
+ */
+ vmx_vcpu_load_vmcs(vcpu, cpu, false);
vmx_sync_vmcs_host_state(vmx, prev);
put_cpu();
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 669e14947ba9..0f9c8d2dd7f6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1311,7 +1311,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
pi_set_on(pi_desc);
}
-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool nested_switch)
+void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool need_ibpb)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
@@ -1336,7 +1336,7 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool nested_switch)
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
- if (!nested_switch)
+ if (need_ibpb)
indirect_branch_prediction_barrier();
}
@@ -1378,7 +1378,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx_vcpu_load_vmcs(vcpu, cpu, false);
+ vmx_vcpu_load_vmcs(vcpu, cpu, true);
vmx_vcpu_pi_load(vcpu, cpu);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index fa61dc802183..e584ee9b3e94 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -320,7 +320,7 @@ struct kvm_vmx {
};
bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool nested_switch);
+void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool need_ibpb);
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
int allocate_vpid(void);
void free_vpid(int vpid);
Paolo
Powered by blists - more mailing lists