lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-Id: <1523829198-13236-9-git-send-email-karahmed@amazon.de> Date: Sun, 15 Apr 2018 23:53:14 +0200 From: KarimAllah Ahmed <karahmed@...zon.de> To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org, pbonzini@...hat.com, rkrcmar@...hat.com Cc: KarimAllah Ahmed <karahmed@...zon.de> Subject: [PATCH v2 08/12] KVM/nVMX: Use kvm_vcpu_map when mapping the posted interrupt descriptor table Use kvm_vcpu_map when mapping the posted interrupt descriptor table since using kvm_vcpu_gpa_to_page() and kmap() will only work for guest memory that has a "struct page". One additional semantic change is that the virtual host mapping lifecycle has changed a bit. It now has the same lifetime of the pinning of the interrupt descriptor table page on the host side. Signed-off-by: KarimAllah Ahmed <karahmed@...zon.de> --- v1 -> v2: - Do not change the lifecycle of the mapping (pbonzini) --- arch/x86/kvm/vmx.c | 45 +++++++++++++++------------------------------ 1 file changed, 15 insertions(+), 30 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b55053a..3dd8bb2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -493,7 +493,7 @@ struct nested_vmx { */ struct page *apic_access_page; struct kvm_host_map virtual_apic_map; - struct page *pi_desc_page; + struct kvm_host_map pi_desc_map; struct kvm_host_map msr_bitmap_map; struct pi_desc *pi_desc; @@ -7807,12 +7807,8 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.apic_access_page = NULL; } kvm_vcpu_unmap(&vmx->nested.virtual_apic_map); - if (vmx->nested.pi_desc_page) { - kunmap(vmx->nested.pi_desc_page); - kvm_release_page_dirty(vmx->nested.pi_desc_page); - vmx->nested.pi_desc_page = NULL; - vmx->nested.pi_desc = NULL; - } + kvm_vcpu_unmap(&vmx->nested.pi_desc_map); + vmx->nested.pi_desc = NULL; free_loaded_vmcs(&vmx->nested.vmcs02); } @@ -10413,24 +10409,16 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) } if (nested_cpu_has_posted_intr(vmcs12)) { - if (vmx->nested.pi_desc_page) { /* shouldn't happen */ - kunmap(vmx->nested.pi_desc_page); - kvm_release_page_dirty(vmx->nested.pi_desc_page); - vmx->nested.pi_desc_page = NULL; + map = &vmx->nested.pi_desc_map; + + if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { + vmx->nested.pi_desc = + (struct pi_desc *)(((void *)map->hva) + + offset_in_page(vmcs12->posted_intr_desc_addr)); + vmcs_write64(POSTED_INTR_DESC_ADDR, pfn_to_hpa(map->pfn) + + offset_in_page(vmcs12->posted_intr_desc_addr)); } - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); - if (is_error_page(page)) - return; - vmx->nested.pi_desc_page = page; - vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); - vmx->nested.pi_desc = - (struct pi_desc *)((void *)vmx->nested.pi_desc + - (unsigned long)(vmcs12->posted_intr_desc_addr & - (PAGE_SIZE - 1))); - vmcs_write64(POSTED_INTR_DESC_ADDR, - page_to_phys(vmx->nested.pi_desc_page) + - (unsigned long)(vmcs12->posted_intr_desc_addr & - (PAGE_SIZE - 1))); + } if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, @@ -12067,13 +12055,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } + kvm_vcpu_unmap(&vmx->nested.virtual_apic_map); - if (vmx->nested.pi_desc_page) { - kunmap(vmx->nested.pi_desc_page); - kvm_release_page_dirty(vmx->nested.pi_desc_page); - vmx->nested.pi_desc_page = NULL; - vmx->nested.pi_desc = NULL; - } + kvm_vcpu_unmap(&vmx->nested.pi_desc_map); + vmx->nested.pi_desc = NULL; /* * We are now running in L2, mmu_notifier will force to reload the -- 2.7.4
Powered by blists - more mailing lists