[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <cfe03ace30dc441f12d11851933e041f2d7548c0.1543481993.git.yi.z.zhang@linux.intel.com>
Date: Fri, 30 Nov 2018 16:09:28 +0800
From: Zhang Yi <yi.z.zhang@...ux.intel.com>
To: pbonzini@...hat.com, mdontu@...defender.com, ncitu@...defender.com
Cc: rkrcmar@...hat.com, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, Zhang Yi <yi.z.zhang@...ux.intel.com>
Subject: [RFC PATCH V2 11/11] KVM: VMX: implement setup SPP page structure in spp miss.
We also should setup SPP page structure while we catch
a SPP miss, some case, such as hotplug vcpu, should update
the SPP page table in SPP miss handler.
Signed-off-by: Zhang Yi <yi.z.zhang@...ux.intel.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/mmu.c | 12 ++++++++++++
arch/x86/kvm/vmx.c | 8 ++++++++
3 files changed, 22 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ce6d258..a09ea39 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1406,6 +1406,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
u32 access_map, gfn_t gfn);
+int kvm_mmu_get_spp_acsess_map(struct kvm *kvm, u32 *access_map, gfn_t gfn);
+
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 287ee62..01cf85e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4299,6 +4299,17 @@ static void mmu_spp_spte_set(u64 *sptep, u64 new_spte)
__set_spte(sptep, new_spte);
}
+int kvm_mmu_get_spp_acsess_map(struct kvm *kvm, u32 *access_map, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ *access_map = *gfn_to_subpage_wp_info(slot, gfn);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_get_spp_acsess_map);
+
int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
u32 access_map, gfn_t gfn)
{
@@ -4344,6 +4355,7 @@ int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
spin_unlock(&kvm->mmu_lock);
return -EFAULT;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_setup_spp_structure);
int kvm_mmu_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info)
{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b660812..b0ab645 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9706,6 +9706,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
static int handle_spp(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
+ gpa_t gpa;
+ gfn_t gfn;
+ u32 map;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -9732,6 +9735,11 @@ static int handle_spp(struct kvm_vcpu *vcpu)
* SPP table here.
*/
pr_debug("SPP: %s: SPPT Miss!!!\n", __func__);
+
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ gfn = gpa >> PAGE_SHIFT;
+ kvm_mmu_get_spp_acsess_map(vcpu->kvm, &map, gfn);
+ kvm_mmu_setup_spp_structure(vcpu, map, gfn);
return 1;
}
--
2.7.4
Powered by blists - more mailing lists