lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <56C7029F.8010106@redhat.com>
Date:	Fri, 19 Feb 2016 12:55:11 +0100
From:	Paolo Bonzini <pbonzini@...hat.com>
To:	Xiao Guangrong <guangrong.xiao@...ux.intel.com>
Cc:	gleb@...nel.org, mtosatti@...hat.com, kvm@...r.kernel.org,
	linux-kernel@...r.kernel.org, kai.huang@...ux.intel.com,
	jike.song@...el.com
Subject: Re: [PATCH v3 10/11] KVM: MMU: clear write-flooding on the fast path
 of tracked page



On 14/02/2016 12:31, Xiao Guangrong wrote:
> If the page fault is caused by write access on write tracked page, the
> real shadow page walking is skipped, we lost the chance to clear write
> flooding for the page structure current vcpu is using
> 
> Fix it by locklessly waking shadow page table to clear write flooding
> on the shadow page structure out of mmu-lock. So that we change the
> count to atomic_t

Should this be moved earlier in the series, so that the issue never
surfaces?

Paolo

> Signed-off-by: Xiao Guangrong <guangrong.xiao@...ux.intel.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  2 +-
>  arch/x86/kvm/mmu.c              | 22 ++++++++++++++++++++--
>  arch/x86/kvm/paging_tmpl.h      |  4 +++-
>  3 files changed, 24 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 282bc2f..254d103 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -277,7 +277,7 @@ struct kvm_mmu_page {
>  #endif
>  
>  	/* Number of writes since the last time traversal visited this page.  */
> -	int write_flooding_count;
> +	atomic_t write_flooding_count;
>  };
>  
>  struct kvm_pio_request {
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 4986615..f924e6c 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2073,7 +2073,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
>  
>  static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
>  {
> -	sp->write_flooding_count = 0;
> +	atomic_set(&sp->write_flooding_count,  0);
>  }
>  
>  static void clear_sp_write_flooding_count(u64 *spte)
> @@ -3407,6 +3407,23 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
>  	return false;
>  }
>  
> +static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
> +{
> +	struct kvm_shadow_walk_iterator iterator;
> +	u64 spte;
> +
> +	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
> +		return;
> +
> +	walk_shadow_page_lockless_begin(vcpu);
> +	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
> +		clear_sp_write_flooding_count(iterator.sptep);
> +		if (!is_shadow_present_pte(spte))
> +			break;
> +	}
> +	walk_shadow_page_lockless_end(vcpu);
> +}
> +
>  static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
>  				u32 error_code, bool prefault)
>  {
> @@ -4236,7 +4253,8 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
>  	if (sp->role.level == PT_PAGE_TABLE_LEVEL)
>  		return false;
>  
> -	return ++sp->write_flooding_count >= 3;
> +	atomic_inc(&sp->write_flooding_count);
> +	return atomic_read(&sp->write_flooding_count) >= 3;
>  }
>  
>  /*
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index c3a30c2..5985156 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -735,8 +735,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>  		return 0;
>  	}
>  
> -	if (page_fault_handle_page_track(vcpu, error_code, walker.gfn))
> +	if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
> +		shadow_page_table_clear_flood(vcpu, addr);
>  		return 1;
> +	}
>  
>  	vcpu->arch.write_fault_to_shadow_pgtable = false;
>  
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ