lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9810c96a-2156-4653-8055-701c0744528c@linux.intel.com>
Date:   Wed, 22 Nov 2023 10:18:47 +0800
From:   Binbin Wu <binbin.wu@...ux.intel.com>
To:     Isaku Yamahata <isaku.yamahata@...ux.intel.com>
Cc:     isaku.yamahata@...el.com, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org, isaku.yamahata@...il.com,
        Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
        Sean Christopherson <seanjc@...gle.com>,
        Sagi Shahar <sagis@...gle.com>,
        David Matlack <dmatlack@...gle.com>,
        Kai Huang <kai.huang@...el.com>,
        Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
        hang.yuan@...el.com, tina.zhang@...el.com,
        Xiaoyao Li <xiaoyao.li@...el.com>
Subject: Re: [PATCH v6 11/16] KVM: x86/tdp_mmu: Split the large page when zap
 leaf



On 11/21/2023 7:00 PM, Isaku Yamahata wrote:
> On Tue, Nov 21, 2023 at 05:57:28PM +0800,
> Binbin Wu <binbin.wu@...ux.intel.com> wrote:
>
>>> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
>>> index 7873e9ee82ad..a209a67decae 100644
>>> --- a/arch/x86/kvm/mmu/tdp_mmu.c
>>> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
>>> @@ -964,6 +964,14 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
>>>    	return true;
>>>    }
>>> +
>>> +static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
>>> +						       struct tdp_iter *iter,
>>> +						       bool shared);
>>> +
>>> +static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
>>> +				   struct kvm_mmu_page *sp, bool shared);
>>> +
>>>    /*
>>>     * If can_yield is true, will release the MMU lock and reschedule if the
>>>     * scheduler needs the CPU or there is contention on the MMU lock. If this
>>> @@ -975,13 +983,15 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
>>>    			      gfn_t start, gfn_t end, bool can_yield, bool flush,
>>>    			      bool zap_private)
>>>    {
>>> +	bool is_private = is_private_sp(root);
>>> +	struct kvm_mmu_page *split_sp = NULL;
>>>    	struct tdp_iter iter;
>>>    	end = min(end, tdp_mmu_max_gfn_exclusive());
>>>    	lockdep_assert_held_write(&kvm->mmu_lock);
>>> -	WARN_ON_ONCE(zap_private && !is_private_sp(root));
>>> +	WARN_ON_ONCE(zap_private && !is_private);
>>>    	if (!zap_private && is_private_sp(root))
>> Can use is_private instead of is_private_sp(root) here as well.
> I'll update it.
>
>>>    		return false;
>>> @@ -1006,12 +1016,66 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
>>>    		    !is_last_spte(iter.old_spte, iter.level))
>>>    			continue;
>>> +		if (is_private && kvm_gfn_shared_mask(kvm) &&
>>> +		    is_large_pte(iter.old_spte)) {
>>> +			gfn_t gfn = iter.gfn & ~kvm_gfn_shared_mask(kvm);
>>> +			gfn_t mask = KVM_PAGES_PER_HPAGE(iter.level) - 1;
>>> +			struct kvm_memory_slot *slot;
>>> +			struct kvm_mmu_page *sp;
>>> +
>>> +			slot = gfn_to_memslot(kvm, gfn);
>>> +			if (kvm_hugepage_test_mixed(slot, gfn, iter.level) ||
>>> +			    (gfn & mask) < start ||
>>> +			    end < (gfn & mask) + KVM_PAGES_PER_HPAGE(iter.level)) {
>>> +				WARN_ON_ONCE(!can_yield);
>>> +				if (split_sp) {
>>> +					sp = split_sp;
>>> +					split_sp = NULL;
>>> +					sp->role = tdp_iter_child_role(&iter);
>>> +				} else {
>>> +					WARN_ON(iter.yielded);
>>> +					if (flush && can_yield) {
>>> +						kvm_flush_remote_tlbs(kvm);
>>> +						flush = false;
>>> +					}
>> Is it necessary to do the flush here?
> Because tdp_mmu_alloc_sp_for_split() may unlock mmu_lock and block.
> While blocking, other thread operates on KVM MMU and gets confused due to
> remaining TLB cache.
>
>
>>> +					sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, false);
>>> +					if (iter.yielded) {
>>> +						split_sp = sp;
>>> +						continue;
>>> +					}
>>> +				}
>>> +				KVM_BUG_ON(!sp, kvm);
>>> +
>>> +				tdp_mmu_init_sp(sp, iter.sptep, iter.gfn);
>>> +				if (tdp_mmu_split_huge_page(kvm, &iter, sp, false)) {
>>> +					kvm_flush_remote_tlbs(kvm);
>>> +					flush = false;
>> Why it needs to flush TLB immediately if tdp_mmu_split_huge_page() fails?
> Hmm, we don't need it.  When breaking up page table, we need to tlb flush
> before issuing TDH.MEM.PAGE.DEMOTE(), not after it.  Will remove those two lines.
>
>
>> Also, when KVM MMU write lock is held, it seems tdp_mmu_split_huge_page()
>> will not fail.
> This can happen with TDX_OPERAND_BUSY with secure-ept tree lock with other
> vcpus TDH.VP.ENTER(). TDH.VP.ENTER() can take exclusive lock of secure-EPT.
>
>
>> But let's assume this condition can be triggered, since sp is
>> local
>> variable, it will lost its value after continue, and split_sp is also NULL,
>> it will try to allocate a new sp, memory leakage here?
> Nice catch. I'll add split_sp = sp;
>
>
>>> +					/* force retry on this gfn. */
>>> +					iter.yielded = true;
>>> +				} else
>>> +					flush = true;
>>> +				continue;
>>> +			}
>>> +		}
>>> +
>>>    		tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
>>>    		flush = true;
>>>    	}
>>>    	rcu_read_unlock();
>>> +	if (split_sp) {
>>> +		WARN_ON(!can_yield);
>>> +		if (flush) {
>>> +			kvm_flush_remote_tlbs(kvm);
>>> +			flush = false;
>>> +		}
>> Same here, why we need to do the flush here?
>> Can we delay it till the caller do the flush?
> No. Because we unlock mmu_lock and may block when freeing memory.
But I don't find it may block during freeing memory.
Did I miss anything?


>
>>> +
>>> +		write_unlock(&kvm->mmu_lock);
>>> +		tdp_mmu_free_sp(split_sp);
>>> +		write_lock(&kvm->mmu_lock);
>>> +	}
>>> +
>>>    	/*
>>>    	 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
>>>    	 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
>>> @@ -1606,8 +1670,6 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
>>>    	KVM_BUG_ON(kvm_mmu_page_role_is_private(role) !=
>>>    		   is_private_sptep(iter->sptep), kvm);
>>> -	/* TODO: Large page isn't supported for private SPTE yet. */
>>> -	KVM_BUG_ON(kvm_mmu_page_role_is_private(role), kvm);
>>>    	/*
>>>    	 * Since we are allocating while under the MMU lock we have to be
>>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ