lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 26 Sep 2018 07:40:19 -0700
From:   Sean Christopherson <sean.j.christopherson@...el.com>
To:     Vitaly Kuznetsov <vkuznets@...hat.com>
Cc:     kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        Jim Mattson <jmattson@...gle.com>,
        Liran Alon <liran.alon@...cle.com>,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 6/9] x86/kvm/mmu: make space for source data caching
 in struct kvm_mmu

On Tue, Sep 25, 2018 at 07:58:41PM +0200, Vitaly Kuznetsov wrote:
> In preparation to MMU reconfiguration avoidance we need a space to
> cache source data. As this partially intersects with kvm_mmu_page_role,
> create 64bit sized union kvm_mmu_role holding both base and extended data.
> No functional change.
> 
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---

One nit below, other than that...

Reviewed-by: Sean Christopherson <sean.j.christopherson@...el.com>

> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index e59e5f49c8c2..bb1ef0f68f8e 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2359,7 +2359,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>  	int collisions = 0;
>  	LIST_HEAD(invalid_list);
>  
> -	role = vcpu->arch.mmu->base_role;
> +	role = vcpu->arch.mmu->mmu_role.base;
>  	role.level = level;
>  	role.direct = direct;
>  	if (role.direct)
> @@ -4407,7 +4407,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
>  void
>  reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
>  {
> -	bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
> +	bool uses_nx = context->nx ||
> +		context->mmu_role.base.smep_andnot_wp;
>  	struct rsvd_bits_validate *shadow_zero_check;
>  	int i;
>  
> @@ -4726,7 +4727,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_mmu *context = vcpu->arch.mmu;
>  
> -	context->base_role.word = mmu_base_role_mask.word &
> +	context->mmu_role.base.word = mmu_base_role_mask.word &
>  				  kvm_calc_tdp_mmu_root_page_role(vcpu).word;
>  	context->page_fault = tdp_page_fault;
>  	context->sync_page = nonpaging_sync_page;
> @@ -4807,7 +4808,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
>  	else
>  		paging32_init_context(vcpu, context);
>  
> -	context->base_role.word = mmu_base_role_mask.word &
> +	context->mmu_role.base.word = mmu_base_role_mask.word &
>  				  kvm_calc_shadow_mmu_root_page_role(vcpu).word;
>  	reset_shadow_zero_bits_mask(vcpu, context);
>  }
> @@ -4816,7 +4817,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
>  static union kvm_mmu_page_role
>  kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
>  {
> -	union kvm_mmu_page_role role = vcpu->arch.mmu->base_role;
> +	union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base;
>  
>  	role.level = PT64_ROOT_4LEVEL;
>  	role.direct = false;
> @@ -4846,7 +4847,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
>  	context->update_pte = ept_update_pte;
>  	context->root_level = PT64_ROOT_4LEVEL;
>  	context->direct_map = false;
> -	context->base_role.word = root_page_role.word & mmu_base_role_mask.word;
> +	context->mmu_role.base.word =
> +		root_page_role.word & mmu_base_role_mask.word;
>  	context->get_pdptr = kvm_pdptr_read;
>  
>  	update_permission_bitmask(vcpu, context, true);
> @@ -5161,10 +5163,13 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>  
>  		local_flush = true;
>  		while (npte--) {
> +			unsigned int base_role =

Nit: should this be a u32 to match mmu_role.base.word?

> +				vcpu->arch.mmu->mmu_role.base.word;
> +
>  			entry = *spte;
>  			mmu_page_zap_pte(vcpu->kvm, sp, spte);
>  			if (gentry &&
> -			      !((sp->role.word ^ vcpu->arch.mmu->base_role.word)
> +			      !((sp->role.word ^ base_role)
>  			      & mmu_base_role_mask.word) && rmap_can_add(vcpu))
>  				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
>  			if (need_remote_flush(entry, *spte))
> @@ -5861,6 +5866,16 @@ int kvm_mmu_module_init(void)
>  {
>  	int ret = -ENOMEM;
>  
> +	/*
> +	 * MMU roles use union aliasing which is, generally speaking, an
> +	 * undefined behavior. However, we supposedly know how compilers behave
> +	 * and the current status quo is unlikely to change. Guardians below are
> +	 * supposed to let us know if the assumption becomes false.
> +	 */
> +	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
> +	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
> +	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
> +
>  	kvm_mmu_reset_all_pte_masks();
>  
>  	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 93ff08136fc1..c56a80c15c4f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -9321,7 +9321,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
>  
>  		kvm_mmu_unload(vcpu);
>  		mmu->ept_ad = accessed_dirty;
> -		mmu->base_role.ad_disabled = !accessed_dirty;
> +		mmu->mmu_role.base.ad_disabled = !accessed_dirty;
>  		vmcs12->ept_pointer = address;
>  		/*
>  		 * TODO: Check what's the correct approach in case
> -- 
> 2.17.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ