[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220311154943.2299191-14-vkuznets@redhat.com>
Date: Fri, 11 Mar 2022 16:49:25 +0100
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
linux-hyperv@...r.kernel.org,
Siddharth Chandrasekaran <sidcha@...zon.de>,
linux-kernel@...r.kernel.org
Subject: [PATCH 13/31] KVM: x86: hyper-v: Direct TLB flush
Handle Direct TLB flush requests from L2 by going through all vCPUs
and checking whether there are vCPUs running the same VM_ID with a
VP_ID specified in the requests. Perform synthetic exit to L2 upon
finish.
Note, while checking VM_ID/VP_ID of running vCPUs seem to be a bit
racy, we count on the fact that KVM flushes the whole L2 VPID upon
transition. Also, KVM_REQ_HV_TLB_FLUSH request needs to be done upon
transition between L1 and L2 to make sure all pending requests are
always processed.
Note, while nVMX/nSVM code does not handle VMCALL/VMMCALL from L2 yet.
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
---
arch/x86/kvm/hyperv.c | 65 ++++++++++++++++++++++++++++++++++++-------
arch/x86/kvm/trace.h | 21 ++++++++------
2 files changed, 68 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index b88e44a126b8..865b4d9a26fe 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -34,6 +34,7 @@
#include <linux/eventfd.h>
#include <asm/apicdef.h>
+#include <asm/mshyperv.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -1863,8 +1864,8 @@ static inline int hv_tlb_flush_ring_free(struct kvm_vcpu_hv *hv_vcpu,
return read_idx - write_idx - 1;
}
-static void hv_tlb_flush_ring_enqueue(struct kvm_vcpu *vcpu, bool flush_all,
- u64 *entries, int count)
+static void hv_tlb_flush_ring_enqueue(struct kvm_vcpu *vcpu, bool direct,
+ bool flush_all, u64 *entries, int count)
{
struct kvm_vcpu_hv_tlbflush_ring *tlb_flush_ring;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
@@ -1875,7 +1876,7 @@ static void hv_tlb_flush_ring_enqueue(struct kvm_vcpu *vcpu, bool flush_all,
if (!hv_vcpu)
return;
- tlb_flush_ring = &hv_vcpu->tlb_flush_ring[0];
+ tlb_flush_ring = direct ? &hv_vcpu->tlb_flush_ring[1] : &hv_vcpu->tlb_flush_ring[0];
spin_lock_irqsave(&tlb_flush_ring->write_lock, flags);
@@ -2010,7 +2011,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
}
trace_kvm_hv_flush_tlb(flush.processor_mask,
- flush.address_space, flush.flags);
+ flush.address_space, flush.flags,
+ is_guest_mode(vcpu));
valid_bank_mask = BIT_ULL(0);
sparse_banks[0] = flush.processor_mask;
@@ -2041,7 +2043,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
flush_ex.hv_vp_set.format,
flush_ex.address_space,
- flush_ex.flags);
+ flush_ex.flags, is_guest_mode(vcpu));
valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
all_cpus = flush_ex.hv_vp_set.format !=
@@ -2080,19 +2082,45 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
- if (all_cpus) {
+ if (all_cpus && !is_guest_mode(vcpu)) {
kvm_for_each_vcpu(i, v, kvm)
- hv_tlb_flush_ring_enqueue(v, all_addr, entries, hc->rep_cnt);
+ hv_tlb_flush_ring_enqueue(v, false, all_addr, entries, hc->rep_cnt);
kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
- } else {
+ } else if (!is_guest_mode(vcpu)) {
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
v = kvm_get_vcpu(kvm, i);
if (!v)
continue;
- hv_tlb_flush_ring_enqueue(v, all_addr, entries, hc->rep_cnt);
+ hv_tlb_flush_ring_enqueue(v, false, all_addr, entries, hc->rep_cnt);
+ }
+
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
+ } else {
+ struct kvm_vcpu_hv *hv_v;
+
+ bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
+
+ kvm_for_each_vcpu(i, v, kvm) {
+ hv_v = to_hv_vcpu(v);
+
+ /*
+ * TLB is fully flushed on L2 VM change: either by KVM
+ * (on a eVMPTR switch) or by L1 hypervisor (in case it
+ * re-purposes the active eVMCS for a different VM/VP).
+ */
+ if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
+ continue;
+
+ if (!all_cpus &&
+ !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
+ sparse_banks))
+ continue;
+
+ __set_bit(i, vcpu_mask);
+ hv_tlb_flush_ring_enqueue(v, true, all_addr, entries, hc->rep_cnt);
}
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
@@ -2280,10 +2308,27 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
{
+ int ret;
+
trace_kvm_hv_hypercall_done(result);
kvm_hv_hypercall_set_result(vcpu, result);
++vcpu->stat.hypercalls;
- return kvm_skip_emulated_instruction(vcpu);
+ ret = kvm_skip_emulated_instruction(vcpu);
+
+ if (unlikely(hv_result_success(result) && is_guest_mode(vcpu)
+ && kvm_hv_is_tlb_flush_hcall(vcpu))) {
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ u32 tlb_lock_count;
+
+ if (unlikely(kvm_read_guest(vcpu->kvm, hv_vcpu->nested.pa_page_gpa,
+ &tlb_lock_count, sizeof(tlb_lock_count))))
+ kvm_inject_gp(vcpu, 0);
+
+ if (tlb_lock_count)
+ kvm_x86_ops.nested_ops->post_hv_direct_flush(vcpu);
+ }
+
+ return ret;
}
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 193f5ba930d1..82e1646b9433 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1477,38 +1477,41 @@ TRACE_EVENT(kvm_hv_timer_state,
* Tracepoint for kvm_hv_flush_tlb.
*/
TRACE_EVENT(kvm_hv_flush_tlb,
- TP_PROTO(u64 processor_mask, u64 address_space, u64 flags),
- TP_ARGS(processor_mask, address_space, flags),
+ TP_PROTO(u64 processor_mask, u64 address_space, u64 flags, bool direct),
+ TP_ARGS(processor_mask, address_space, flags, direct),
TP_STRUCT__entry(
__field(u64, processor_mask)
__field(u64, address_space)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
__entry->processor_mask = processor_mask;
__entry->address_space = address_space;
__entry->flags = flags;
+ __entry->direct = direct;
),
- TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx",
+ TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx %s",
__entry->processor_mask, __entry->address_space,
- __entry->flags)
+ __entry->flags, __entry->direct ? "(direct)" : "")
);
/*
* Tracepoint for kvm_hv_flush_tlb_ex.
*/
TRACE_EVENT(kvm_hv_flush_tlb_ex,
- TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags),
- TP_ARGS(valid_bank_mask, format, address_space, flags),
+ TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags, bool direct),
+ TP_ARGS(valid_bank_mask, format, address_space, flags, direct),
TP_STRUCT__entry(
__field(u64, valid_bank_mask)
__field(u64, format)
__field(u64, address_space)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
@@ -1516,12 +1519,14 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex,
__entry->format = format;
__entry->address_space = address_space;
__entry->flags = flags;
+ __entry->direct = direct;
),
TP_printk("valid_bank_mask 0x%llx format 0x%llx "
- "address_space 0x%llx flags 0x%llx",
+ "address_space 0x%llx flags 0x%llx %s",
__entry->valid_bank_mask, __entry->format,
- __entry->address_space, __entry->flags)
+ __entry->address_space, __entry->flags,
+ __entry->direct ? "(direct)" : "")
);
/*
--
2.35.1
Powered by blists - more mailing lists