lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YoPRuh2T/9RvWih4@google.com>
Date:   Tue, 17 May 2022 16:47:54 +0000
From:   David Matlack <dmatlack@...gle.com>
To:     Lai Jiangshan <jiangshanlai@...il.com>
Cc:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        Paolo Bonzini <pbonzini@...hat.com>,
        Sean Christopherson <seanjc@...gle.com>,
        Lai Jiangshan <jiangshan.ljs@...group.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
        "H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH V2 5/7] KVM: X86/MMU: Remove the check of the return
 value of to_shadow_page()

On Tue, May 03, 2022 at 11:07:33PM +0800, Lai Jiangshan wrote:
> From: Lai Jiangshan <jiangshan.ljs@...group.com>
> 
> Remove the check of the return value of to_shadow_page() in
> mmu_free_root_page(), kvm_mmu_free_guest_mode_roots(), is_unsync_root()
> and is_tdp_mmu() because it can not return NULL.
> 
> Remove the check of the return value of to_shadow_page() in
> is_page_fault_stale() and is_obsolete_root() because it can not return
> NULL and the obsoleting for special shadow page is already handled by
> a different way.
> 
> When the obsoleting process is done, all the obsoleted shadow pages are
> already unlinked from the special pages by the help of the parent rmap
> of the children and the special pages become theoretically valid again.
> The special shadow page can be freed if is_obsolete_sp() return true,
> or be reused if is_obsolete_sp() return false.
> 
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>

Reviewed-by: David Matlack <dmatlack@...gle.com>

> ---
>  arch/x86/kvm/mmu/mmu.c     | 44 +++-----------------------------------
>  arch/x86/kvm/mmu/tdp_mmu.h |  7 +-----
>  2 files changed, 4 insertions(+), 47 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 6f626d7e8ebb..bcb3e2730277 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3318,8 +3318,6 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
>  		return;
>  
>  	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
> -	if (WARN_ON(!sp))
> -		return;
>  
>  	if (is_tdp_mmu_page(sp))
>  		kvm_tdp_mmu_put_root(kvm, sp, false);
> @@ -3422,8 +3420,7 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
>  		if (!VALID_PAGE(root_hpa))
>  			continue;
>  
> -		if (!to_shadow_page(root_hpa) ||
> -			to_shadow_page(root_hpa)->role.guest_mode)
> +		if (to_shadow_page(root_hpa)->role.guest_mode)
>  			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
>  	}
>  
> @@ -3673,13 +3670,6 @@ static bool is_unsync_root(hpa_t root)
>  	smp_rmb();
>  	sp = to_shadow_page(root);
>  
> -	/*
> -	 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
> -	 * PDPTEs for a given PAE root need to be synchronized individually.
> -	 */
> -	if (WARN_ON_ONCE(!sp))
> -		return false;
> -
>  	if (sp->unsync || sp->unsync_children)
>  		return true;
>  
> @@ -3975,21 +3965,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
>  static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
>  				struct kvm_page_fault *fault, int mmu_seq)
>  {
> -	struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
> -
> -	/* Special roots, e.g. pae_root, are not backed by shadow pages. */
> -	if (sp && is_obsolete_sp(vcpu->kvm, sp))
> -		return true;
> -
> -	/*
> -	 * Roots without an associated shadow page are considered invalid if
> -	 * there is a pending request to free obsolete roots.  The request is
> -	 * only a hint that the current root _may_ be obsolete and needs to be
> -	 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
> -	 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
> -	 * to reload even if no vCPU is actively using the root.
> -	 */
> -	if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
> +	if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root.hpa)))
>  		return true;
>  
>  	return fault->slot &&
> @@ -5094,24 +5070,10 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
>  
>  static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
>  {
> -	struct kvm_mmu_page *sp;
> -
>  	if (!VALID_PAGE(root_hpa))
>  		return false;
>  
> -	/*
> -	 * When freeing obsolete roots, treat roots as obsolete if they don't
> -	 * have an associated shadow page.  This does mean KVM will get false
> -	 * positives and free roots that don't strictly need to be freed, but
> -	 * such false positives are relatively rare:
> -	 *
> -	 *  (a) only PAE paging and nested NPT has roots without shadow pages
> -	 *  (b) remote reloads due to a memslot update obsoletes _all_ roots
> -	 *  (c) KVM doesn't track previous roots for PAE paging, and the guest
> -	 *      is unlikely to zap an in-use PGD.
> -	 */
> -	sp = to_shadow_page(root_hpa);
> -	return !sp || is_obsolete_sp(kvm, sp);
> +	return is_obsolete_sp(kvm, to_shadow_page(root_hpa));
>  }
>  
>  static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index c163f7cc23ca..5779a2a7161e 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -78,13 +78,8 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
>  	if (WARN_ON(!VALID_PAGE(hpa)))
>  		return false;
>  
> -	/*
> -	 * A NULL shadow page is legal when shadowing a non-paging guest with
> -	 * PAE paging, as the MMU will be direct with root_hpa pointing at the
> -	 * pae_root page, not a shadow page.
> -	 */
>  	sp = to_shadow_page(hpa);
> -	return sp && is_tdp_mmu_page(sp) && sp->root_count;
> +	return is_tdp_mmu_page(sp) && sp->root_count;
>  }
>  #else
>  static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
> -- 
> 2.19.1.6.gb485710b
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ