lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220929165206.GA1963093@ls.amr.corp.intel.com>
Date:   Thu, 29 Sep 2022 09:52:06 -0700
From:   Isaku Yamahata <isaku.yamahata@...il.com>
To:     Chao Peng <chao.p.peng@...ux.intel.com>
Cc:     kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
        linux-api@...r.kernel.org, linux-doc@...r.kernel.org,
        qemu-devel@...gnu.org, Paolo Bonzini <pbonzini@...hat.com>,
        Jonathan Corbet <corbet@....net>,
        Sean Christopherson <seanjc@...gle.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
        Hugh Dickins <hughd@...gle.com>,
        Jeff Layton <jlayton@...nel.org>,
        "J . Bruce Fields" <bfields@...ldses.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Shuah Khan <shuah@...nel.org>, Mike Rapoport <rppt@...nel.org>,
        Steven Price <steven.price@....com>,
        "Maciej S . Szmigiero" <mail@...iej.szmigiero.name>,
        Vlastimil Babka <vbabka@...e.cz>,
        Vishal Annapurve <vannapurve@...gle.com>,
        Yu Zhang <yu.c.zhang@...ux.intel.com>,
        "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
        luto@...nel.org, jun.nakajima@...el.com, dave.hansen@...el.com,
        ak@...ux.intel.com, david@...hat.com, aarcange@...hat.com,
        ddutile@...hat.com, dhildenb@...hat.com,
        Quentin Perret <qperret@...gle.com>,
        Michael Roth <michael.roth@....com>, mhocko@...e.com,
        Muchun Song <songmuchun@...edance.com>, wei.w.wang@...el.com,
        isaku.yamahata@...il.com
Subject: Re: [PATCH v8 6/8] KVM: Update lpage info when private/shared memory
 are mixed

On Thu, Sep 15, 2022 at 10:29:11PM +0800,
Chao Peng <chao.p.peng@...ux.intel.com> wrote:

> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 08abad4f3e6f..a0f198cede3d 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
...
> @@ -6894,3 +6899,115 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
>  	if (kvm->arch.nx_lpage_recovery_thread)
>  		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
>  }
> +
> +static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr,
> +			      gfn_t start, gfn_t end)
> +{
> +	XA_STATE(xas, &kvm->mem_attr_array, start);
> +	gfn_t gfn = start;
> +	void *entry;
> +	bool shared, private;
> +	bool mixed = false;
> +
> +	if (attr == KVM_MEM_ATTR_SHARED) {
> +		shared = true;
> +		private = false;
> +	} else {
> +		shared = false;
> +		private = true;
> +	}

We don't have to care the target is shared or private.  We need to check
only same or not.

> +
> +	rcu_read_lock();
> +	entry = xas_load(&xas);
> +	while (gfn < end) {
> +		if (xas_retry(&xas, entry))
> +			continue;
> +
> +		KVM_BUG_ON(gfn != xas.xa_index, kvm);
> +
> +		if (entry)
> +			private = true;
> +		else
> +			shared = true;
> +
> +		if (private && shared) {
> +			mixed = true;
> +			goto out;
> +		}
> +
> +		entry = xas_next(&xas);
> +		gfn++;
> +	}
> +out:
> +	rcu_read_unlock();
> +	return mixed;
> +}
> +
> +static inline void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
> +{
> +	if (mixed)
> +		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
> +	else
> +		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
> +}
> +
> +static void update_mem_lpage_info(struct kvm *kvm,
> +				  struct kvm_memory_slot *slot,
> +				  unsigned int attr,
> +				  gfn_t start, gfn_t end)
> +{
> +	unsigned long lpage_start, lpage_end;
> +	unsigned long gfn, pages, mask;
> +	int level;
> +
> +	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
> +		pages = KVM_PAGES_PER_HPAGE(level);
> +		mask = ~(pages - 1);
> +		lpage_start = start & mask;
> +		lpage_end = (end - 1) & mask;
> +
> +		/*
> +		 * We only need to scan the head and tail page, for middle pages
> +		 * we know they are not mixed.
> +		 */
> +		update_mixed(lpage_info_slot(lpage_start, slot, level),
> +			     mem_attr_is_mixed(kvm, attr, lpage_start,
> +							  lpage_start + pages));
> +
> +		if (lpage_start == lpage_end)
> +			return;
> +
> +		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
> +			update_mixed(lpage_info_slot(gfn, slot, level), false);


For >2M case, we don't have to check all entry. just check lower level case.

> +
> +		update_mixed(lpage_info_slot(lpage_end, slot, level),
> +			     mem_attr_is_mixed(kvm, attr, lpage_end,
> +							  lpage_end + pages));
> +	}
> +}
> +
> +void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
> +			      gfn_t start, gfn_t end)
> +{
> +	struct kvm_memory_slot *slot;
> +	struct kvm_memslots *slots;
> +	struct kvm_memslot_iter iter;
> +	int i;
> +
> +	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
> +			"Unsupported mem attribute.\n");
> +
> +	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> +		slots = __kvm_memslots(kvm, i);
> +
> +		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
> +			slot = iter.slot;
> +			start = max(start, slot->base_gfn);
> +			end = min(end, slot->base_gfn + slot->npages);
> +			if (WARN_ON_ONCE(start >= end))
> +				continue;
> +
> +			update_mem_lpage_info(kvm, slot, attr, start, end);
> +		}
> +	}
> +}


Here is my updated version.

bool kvm_mem_attr_is_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level)
{
	gfn_t pages = KVM_PAGES_PER_HPAGE(level);
	gfn_t mask = ~(pages - 1);
	struct kvm_lpage_info *linfo = lpage_info_slot(gfn & mask, slot, level);

	WARN_ON_ONCE(level == PG_LEVEL_4K);
	return linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED;
}

#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM_ATTR
static void update_mixed(struct kvm_lpage_info *linfo, bool mixed)
{
	if (mixed)
		linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED;
	else
		linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED;
}

static bool __mem_attr_is_mixed(struct kvm *kvm, gfn_t start, gfn_t end)
{
	XA_STATE(xas, &kvm->mem_attr_array, start);
	bool mixed = false;
	gfn_t gfn = start;
	void *s_entry;
	void *entry;

	rcu_read_lock();
	s_entry = xas_load(&xas);
	entry = s_entry;
	while (gfn < end) {
		if (xas_retry(&xas, entry))
			continue;

		KVM_BUG_ON(gfn != xas.xa_index, kvm);

		entry = xas_next(&xas);
		if (entry != s_entry) {
			mixed = true;
			break;
		}
		gfn++;
	}
	rcu_read_unlock();
	return mixed;
}

static bool mem_attr_is_mixed(struct kvm *kvm,
			      struct kvm_memory_slot *slot, int level,
			      gfn_t start, gfn_t end)
{
	struct kvm_lpage_info *child_linfo;
	unsigned long child_pages;
	bool mixed = false;
	unsigned long gfn;
	void *entry;

	if (WARN_ON_ONCE(level == PG_LEVEL_4K))
		return false;

	if (level == PG_LEVEL_2M)
		return __mem_attr_is_mixed(kvm, start, end);

	/* This assumes that level - 1 is already updated. */
	rcu_read_lock();
	child_pages = KVM_PAGES_PER_HPAGE(level - 1);
	entry = xa_load(&kvm->mem_attr_array, start);
	for (gfn = start; gfn < end; gfn += child_pages) {
		child_linfo = lpage_info_slot(gfn, slot, level - 1);
		if (child_linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED) {
			mixed = true;
			break;
		}
		if (xa_load(&kvm->mem_attr_array, gfn) != entry) {
			mixed = true;
			break;
		}
	}
	rcu_read_unlock();
	return mixed;
}

static void update_mem_lpage_info(struct kvm *kvm,
				  struct kvm_memory_slot *slot,
				  unsigned int attr,
				  gfn_t start, gfn_t end)
{
	unsigned long lpage_start, lpage_end;
	unsigned long gfn, pages, mask;
	int level;

	for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
		pages = KVM_PAGES_PER_HPAGE(level);
		mask = ~(pages - 1);
		lpage_start = start & mask;
		lpage_end = (end - 1) & mask;

		/*
		 * We only need to scan the head and tail page, for middle pages
		 * we know they are not mixed.
		 */
		update_mixed(lpage_info_slot(lpage_start, slot, level),
			     mem_attr_is_mixed(kvm, slot, level,
					       lpage_start, lpage_start + pages));

		if (lpage_start == lpage_end)
			return;

		for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages)
			update_mixed(lpage_info_slot(gfn, slot, level), false);

		update_mixed(lpage_info_slot(lpage_end, slot, level),
			     mem_attr_is_mixed(kvm, slot, level,
					       lpage_end, lpage_end + pages));
	}
}

void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr,
			      gfn_t start, gfn_t end)
{
	struct kvm_memory_slot *slot;
	struct kvm_memslots *slots;
	struct kvm_memslot_iter iter;
	int idx;
	int i;

	WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)),
		  "Unsupported mem attribute.\n");

	idx = srcu_read_lock(&kvm->srcu);
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);

		kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
			slot = iter.slot;
			start = max(start, slot->base_gfn);
			end = min(end, slot->base_gfn + slot->npages);
			if (WARN_ON_ONCE(start >= end))
				continue;

			update_mem_lpage_info(kvm, slot, attr, start, end);
		}
	}
	srcu_read_unlock(&kvm->srcu, idx);
}
#endif


-- 
Isaku Yamahata <isaku.yamahata@...il.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ