[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180910083806.65177-14-Tianyu.Lan@microsoft.com>
Date: Mon, 10 Sep 2018 08:39:46 +0000
From: Tianyu Lan <Tianyu.Lan@...rosoft.com>
To: unlisted-recipients:; (no To-header on input)
CC: Tianyu Lan <Tianyu.Lan@...rosoft.com>,
"pbonzini@...hat.com" <pbonzini@...hat.com>,
"rkrcmar@...hat.com" <rkrcmar@...hat.com>,
"tglx@...utronix.de" <tglx@...utronix.de>,
"mingo@...hat.com" <mingo@...hat.com>,
"hpa@...or.com" <hpa@...or.com>, "x86@...nel.org" <x86@...nel.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"Michael Kelley (EOSG)" <Michael.H.Kelley@...rosoft.com>,
KY Srinivasan <kys@...rosoft.com>,
vkuznets <vkuznets@...hat.com>,
Jork Loeser <Jork.Loeser@...rosoft.com>
Subject: [PATCH 13/13] KVM/VMX: Add hv tlb range flush support
This patch is to register tlb_remote_flush_with_range callback with
hv tlb range flush interface.
Signed-off-by: Lan Tianyu <Tianyu.Lan@...rosoft.com>
---
arch/x86/kvm/vmx.c | 34 +++++++++++++++++++++++++++-------
1 file changed, 27 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3f9ccbafd0d8..ae904693b14f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1555,7 +1555,19 @@ static void check_ept_pointer_match(struct kvm *kvm)
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
}
-static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
+static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
+{
+ u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
+
+ if (range)
+ return hyperv_flush_guest_mapping_range(ept_pointer, range);
+ else
+ return hyperv_flush_guest_mapping(ept_pointer);
+}
+
+static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
{
struct kvm_vcpu *vcpu;
int ret = -ENOTSUPP, i;
@@ -1567,16 +1579,21 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
kvm_for_each_vcpu(i, vcpu, kvm)
- ret |= hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer);
+ ret |= __hv_remote_flush_tlb_with_range(
+ kvm, vcpu, range);
} else {
- ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
+ ret = __hv_remote_flush_tlb_with_range(kvm,
+ kvm_get_vcpu(kvm, 0), range);
}
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
return ret;
}
+
+static int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -7918,8 +7935,11 @@ static __init int hardware_setup(void)
#if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
- && enable_ept)
- kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
+ && enable_ept) {
+ kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
+ kvm_x86_ops->tlb_remote_flush_with_range =
+ hv_remote_flush_tlb_with_range;
+ }
#endif
if (!cpu_has_vmx_ple()) {
--
2.14.4
Powered by blists - more mailing lists