[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1557758315-12667-23-git-send-email-alexandre.chartre@oracle.com>
Date: Mon, 13 May 2019 16:38:30 +0200
From: Alexandre Chartre <alexandre.chartre@...cle.com>
To: pbonzini@...hat.com, rkrcmar@...hat.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
kvm@...r.kernel.org, x86@...nel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: konrad.wilk@...cle.com, jan.setjeeilers@...cle.com,
liran.alon@...cle.com, jwadams@...gle.com,
alexandre.chartre@...cle.com
Subject: [RFC KVM 22/27] kvm/isolation: initialize the KVM page table with vmx cpu data
Map vmx cpu to the KVM address space when a vmx cpu is created, and
unmap when it is freed.
Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
arch/x86/kvm/vmx/vmx.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 65 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5b52e8c..cbbaf58 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6564,10 +6564,69 @@ static void vmx_vm_free(struct kvm *kvm)
vfree(to_kvm_vmx(kvm));
}
+static void vmx_unmap_vcpu(struct vcpu_vmx *vmx)
+{
+ pr_debug("unmapping vmx %p", vmx);
+
+ kvm_clear_range_mapping(vmx);
+ if (enable_pml)
+ kvm_clear_range_mapping(vmx->pml_pg);
+ kvm_clear_range_mapping(vmx->guest_msrs);
+ kvm_clear_range_mapping(vmx->vmcs01.vmcs);
+ kvm_clear_range_mapping(vmx->vmcs01.msr_bitmap);
+ kvm_clear_range_mapping(vmx->vcpu.arch.pio_data);
+ kvm_clear_range_mapping(vmx->vcpu.arch.apic);
+}
+
+static int vmx_map_vcpu(struct vcpu_vmx *vmx)
+{
+ int rv;
+
+ pr_debug("mapping vmx %p", vmx);
+
+ rv = kvm_copy_ptes(vmx, sizeof(struct vcpu_vmx));
+ if (rv)
+ goto out_unmap_vcpu;
+
+ if (enable_pml) {
+ rv = kvm_copy_ptes(vmx->pml_pg, PAGE_SIZE);
+ if (rv)
+ goto out_unmap_vcpu;
+ }
+
+ rv = kvm_copy_ptes(vmx->guest_msrs, PAGE_SIZE);
+ if (rv)
+ goto out_unmap_vcpu;
+
+ rv = kvm_copy_ptes(vmx->vmcs01.vmcs, PAGE_SIZE << vmcs_config.order);
+ if (rv)
+ goto out_unmap_vcpu;
+
+ rv = kvm_copy_ptes(vmx->vmcs01.msr_bitmap, PAGE_SIZE);
+ if (rv)
+ goto out_unmap_vcpu;
+
+ rv = kvm_copy_ptes(vmx->vcpu.arch.pio_data, PAGE_SIZE);
+ if (rv)
+ goto out_unmap_vcpu;
+
+ rv = kvm_copy_ptes(vmx->vcpu.arch.apic, sizeof(struct kvm_lapic));
+ if (rv)
+ goto out_unmap_vcpu;
+
+ return 0;
+
+out_unmap_vcpu:
+ vmx_unmap_vcpu(vmx);
+ return rv;
+}
+
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (kvm_isolation())
+ vmx_unmap_vcpu(vmx);
if (enable_pml)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
@@ -6679,6 +6738,12 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
vmx->ept_pointer = INVALID_PAGE;
+ if (kvm_isolation()) {
+ err = vmx_map_vcpu(vmx);
+ if (err)
+ goto free_vmcs;
+ }
+
return &vmx->vcpu;
free_vmcs:
--
1.7.1
Powered by blists - more mailing lists