lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 21 Nov 2016 16:07:03 +0100
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     Brijesh Singh <brijesh.singh@....com>, kvm@...r.kernel.org
Cc:     Thomas.Lendacky@....com, rkrcmar@...hat.com, joro@...tes.org,
        x86@...nel.org, linux-kernel@...r.kernel.org, mingo@...hat.com,
        hpa@...or.com, tglx@...utronix.de, bp@...e.de
Subject: Re: [PATCH v1 3/3] kvm: svm: Use the hardware provided GPA instead of
 page walk



On 14/11/2016 23:16, Brijesh Singh wrote:
> From: Tom Lendacky <thomas.lendacky@....com>
> 
> When a guest causes a NPF which requires emulation, KVM sometimes walks
> the guest page tables to translate the GVA to a GPA. This is unnecessary
> most of the time on AMD hardware since the hardware provides the GPA in
> EXITINFO2.
> 
> The only exception cases involve string operations involving rep or
> operations that use two memory locations. With rep, the GPA will only be
> the value of the initial NPF and with dual memory locations we won't know
> which memory address was translated into EXITINFO2.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
> Reviewed-by: Borislav Petkov <bp@...e.de>
> Signed-off-by: Brijesh Singh <brijesh.singh@....com>
> ---
>  arch/x86/include/asm/kvm_emulate.h |    3 +++
>  arch/x86/include/asm/kvm_host.h    |    3 +++
>  arch/x86/kvm/svm.c                 |    9 ++++++++-
>  arch/x86/kvm/x86.c                 |   17 ++++++++++++++++-
>  4 files changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
> index e9cd7be..2d1ac09 100644
> --- a/arch/x86/include/asm/kvm_emulate.h
> +++ b/arch/x86/include/asm/kvm_emulate.h
> @@ -344,6 +344,9 @@ struct x86_emulate_ctxt {
>  	struct read_cache mem_read;
>  };
>  
> +/* String operation identifier (matches the definition in emulate.c) */
> +#define CTXT_STRING_OP	(1 << 13)
> +
>  /* Repeat String Operation Prefix */
>  #define REPE_PREFIX	0xf3
>  #define REPNE_PREFIX	0xf2
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 77cb3f9..fd5b1c8 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -668,6 +668,9 @@ struct kvm_vcpu_arch {
>  
>  	int pending_ioapic_eoi;
>  	int pending_external_vector;
> +
> +	/* GPA available (AMD only) */
> +	bool gpa_available;
>  };
>  
>  struct kvm_lpage_info {
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 5e64e656..b442c5a 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -275,6 +275,9 @@ static int avic;
>  module_param(avic, int, S_IRUGO);
>  #endif
>  
> +/* EXITINFO2 contains valid GPA */
> +static bool gpa_avail = true;
> +
>  /* AVIC VM ID bit masks and lock */
>  static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
>  static DEFINE_SPINLOCK(avic_vm_id_lock);
> @@ -1055,8 +1058,10 @@ static __init int svm_hardware_setup(void)
>  			goto err;
>  	}
>  
> -	if (!boot_cpu_has(X86_FEATURE_NPT))
> +	if (!boot_cpu_has(X86_FEATURE_NPT)) {
>  		npt_enabled = false;
> +		gpa_avail = false;

This is not necessary, since you will never have exit_code ==
SVM_EXIT_NPF && !gpa_avail.

> +	}
>  
>  	if (npt_enabled && !npt) {
>  		printk(KERN_INFO "kvm: Nested Paging disabled\n");
> @@ -4192,6 +4197,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
>  		vcpu->arch.cr0 = svm->vmcb->save.cr0;
>  	if (npt_enabled)
>  		vcpu->arch.cr3 = svm->vmcb->save.cr3;
> +	if (gpa_avail)
> +		vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);

The "if", and the body moved just before

	return svm_exit_handlers[exit_code](svm);

I'll try doing a similar patch for Intel too, for better testing.

>  
>  	if (unlikely(svm->nested.exit_required)) {
>  		nested_svm_vmexit(svm);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index d02aeff..c290794 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4420,7 +4420,19 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
>  		return 1;
>  	}
>  
> -	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
> +	/*
> +	 * If the exit was due to a NPF we may already have a GPA.
> +	 * If the GPA is present, use it to avoid the GVA to GPA table
> +	 * walk. Note, this cannot be used on string operations since
> +	 * string operation using rep will only have the initial GPA
> +	 * from when the NPF occurred.
> +	 */
> +	if (vcpu->arch.gpa_available &&
> +	    !(vcpu->arch.emulate_ctxt.d & CTXT_STRING_OP))
> +		*gpa = exception->address;
> +	else
> +		*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access,
> +						       exception);
>  
>  	if (*gpa == UNMAPPED_GVA)
>  		return -1;
> @@ -5542,6 +5554,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
>  	}
>  
>  restart:
> +	/* Save the faulting GPA (cr2) in the address field */
> +	ctxt->exception.address = cr2;
> +
>  	r = x86_emulate_insn(ctxt);
>  
>  	if (r == EMULATION_INTERCEPTED)
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ