lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 13 Aug 2020 18:40:14 -0700
From:   Sean Christopherson <sean.j.christopherson@...el.com>
To:     Vitaly Kuznetsov <vkuznets@...hat.com>
Cc:     kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Peter Xu <peterx@...hat.com>, Michael Tsirkin <mst@...hat.com>,
        Julia Suvorova <jsuvorov@...hat.com>,
        Andy Lutomirski <luto@...nel.org>,
        Andrew Jones <drjones@...hat.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 1/3] KVM: x86: move kvm_vcpu_gfn_to_memslot() out of
 try_async_pf()

On Fri, Aug 07, 2020 at 04:12:30PM +0200, Vitaly Kuznetsov wrote:
> No functional change intended. Slot flags will need to be analyzed
> prior to try_async_pf() when KVM_MEM_PCI_HOLE is implemented.

Why?  Wouldn't it be just as easy, and arguably more appropriate, to add
KVM_PFN_ERR_PCI_HOLE and update handle_abornmal_pfn() accordinaly?

> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
>  arch/x86/kvm/mmu/mmu.c         | 14 ++++++++------
>  arch/x86/kvm/mmu/paging_tmpl.h |  7 +++++--
>  2 files changed, 13 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 862bf418214e..fef6956393f7 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4042,11 +4042,10 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
>  				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
>  }
>  
> -static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
> -			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
> -			 bool *writable)
> +static bool try_async_pf(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
> +			 bool prefault, gfn_t gfn, gpa_t cr2_or_gpa,
> +			 kvm_pfn_t *pfn, bool write, bool *writable)
>  {
> -	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
>  	bool async;
>  
>  	/* Don't expose private memslots to L2. */
> @@ -4082,7 +4081,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>  	bool exec = error_code & PFERR_FETCH_MASK;
>  	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
>  	bool map_writable;
> -
> +	struct kvm_memory_slot *slot;
>  	gfn_t gfn = gpa >> PAGE_SHIFT;
>  	unsigned long mmu_seq;
>  	kvm_pfn_t pfn;
> @@ -4104,7 +4103,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
>  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
>  	smp_rmb();
>  
> -	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +
> +	if (try_async_pf(vcpu, slot, prefault, gfn, gpa, &pfn, write,
> +			 &map_writable))
>  		return RET_PF_RETRY;
>  
>  	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 0172a949f6a7..5c6a895f67c3 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -779,6 +779,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
>  	int write_fault = error_code & PFERR_WRITE_MASK;
>  	int user_fault = error_code & PFERR_USER_MASK;
>  	struct guest_walker walker;
> +	struct kvm_memory_slot *slot;
>  	int r;
>  	kvm_pfn_t pfn;
>  	unsigned long mmu_seq;
> @@ -833,8 +834,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
>  	mmu_seq = vcpu->kvm->mmu_notifier_seq;
>  	smp_rmb();
>  
> -	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
> -			 &map_writable))
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, walker.gfn);
> +
> +	if (try_async_pf(vcpu, slot, prefault, walker.gfn, addr, &pfn,
> +			 write_fault, &map_writable))
>  		return RET_PF_RETRY;
>  
>  	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
> -- 
> 2.25.4
> 

Powered by blists - more mailing lists