lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aN_liua2oaAyyuL_@google.com>
Date: Fri, 3 Oct 2025 08:02:34 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Ackerley Tng <ackerleytng@...gle.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, 
	Yan Zhao <yan.y.zhao@...el.com>, Fuad Tabba <tabba@...gle.com>, 
	Binbin Wu <binbin.wu@...ux.intel.com>, Michael Roth <michael.roth@....com>, 
	Ira Weiny <ira.weiny@...el.com>, Rick P Edgecombe <rick.p.edgecombe@...el.com>, 
	Vishal Annapurve <vannapurve@...gle.com>, David Hildenbrand <david@...hat.com>, 
	Paolo Bonzini <pbonzini@...hat.com>
Subject: Re: [RFC PATCH v2 33/51] KVM: guest_memfd: Allocate and truncate from
 custom allocator

Trimmed Cc to KVM/guest_memfd again.

On Wed, May 14, 2025, Ackerley Tng wrote:
> If a custom allocator is requested at guest_memfd creation time, pages
> from the custom allocator will be used to back guest_memfd.
> 
> Change-Id: I59df960b3273790f42fe5bea54a234f40962eb75
> Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
> ---
>  mm/memory.c            |   1 +
>  virt/kvm/guest_memfd.c | 142 +++++++++++++++++++++++++++++++++++++----
>  2 files changed, 132 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index ba3ea0a82f7f..3af45e96913c 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -7249,6 +7249,7 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
>  	else
>  		process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
>  }
> +EXPORT_SYMBOL_GPL(folio_zero_user);

This needs to be in a separate patch.

>  static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
>  				   unsigned long addr_hint,
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index c65d93c5a443..24d270b9b725 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -478,15 +478,13 @@ static inline void kvm_gmem_mark_prepared(struct folio *folio)
>   * leaking host data and the up-to-date flag is set.
>   */
>  static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
> -				  gfn_t gfn, struct folio *folio)
> +				  gfn_t gfn, struct folio *folio,
> +				  unsigned long addr_hint)
>  {
> -	unsigned long nr_pages, i;
>  	pgoff_t index;
>  	int r;
>  
> -	nr_pages = folio_nr_pages(folio);
> -	for (i = 0; i < nr_pages; i++)
> -		clear_highpage(folio_page(folio, i));
> +	folio_zero_user(folio, addr_hint);

As does this.

>  	/*
>  	 * Preparing huge folios should always be safe, since it should
> @@ -554,7 +552,9 @@ static int kvm_gmem_filemap_add_folio(struct address_space *mapping,
>   */
>  static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
>  {
> +	size_t allocated_size;
>  	struct folio *folio;
> +	pgoff_t index_floor;
>  	int ret;
>  
>  repeat:
> @@ -581,8 +581,10 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
>  			return ERR_PTR(ret);
>  		}
>  	}
> +	allocated_size = folio_size(folio);
>  
> -	ret = kvm_gmem_filemap_add_folio(inode->i_mapping, folio, index);
> +	index_floor = round_down(index, folio_nr_pages(folio));
> +	ret = kvm_gmem_filemap_add_folio(inode->i_mapping, folio, index_floor);
>  	if (ret) {
>  		folio_put(folio);
>  
> @@ -598,7 +600,17 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
>  		return ERR_PTR(ret);
>  	}
>  
> -	__folio_set_locked(folio);
> +	spin_lock(&inode->i_lock);
> +	inode->i_blocks += allocated_size / 512;

???  If anything needs a comment, it's this.

> +	spin_unlock(&inode->i_lock);
> +
> +	/*
> +	 * folio is the one that is allocated, this gets the folio at the
> +	 * requested index.
> +	 */
> +	folio = page_folio(folio_file_page(folio, index));
> +	folio_lock(folio);
> +
>  	return folio;
>  }

...

> +static void kvm_gmem_truncate_inode_range(struct inode *inode, loff_t lstart,
> +					  loff_t lend)
> +{
> +	pgoff_t full_hpage_start;
> +	size_t nr_per_huge_page;
> +	pgoff_t full_hpage_end;
> +	size_t nr_pages;
> +	pgoff_t start;
> +	pgoff_t end;
> +	void *priv;
> +
> +	priv = kvm_gmem_allocator_private(inode);
> +	nr_per_huge_page = kvm_gmem_allocator_ops(inode)->nr_pages_in_folio(priv);
> +
> +	start = lstart >> PAGE_SHIFT;
> +	end = min(lend, i_size_read(inode)) >> PAGE_SHIFT;
> +
> +	full_hpage_start = round_up(start, nr_per_huge_page);
> +	full_hpage_end = round_down(end, nr_per_huge_page);

This is where the layer of indirection completely breaks down.  This is blatantly
specific to hugepages, but presented as if it's some generic logic.

> +
> +	if (start < full_hpage_start) {
> +		pgoff_t zero_end = min(full_hpage_start, end);
> +
> +		kvm_gmem_zero_range(inode->i_mapping, start, zero_end);
> +	}
> +
> +	if (full_hpage_end > full_hpage_start) {
> +		nr_pages = full_hpage_end - full_hpage_start;
> +		kvm_gmem_truncate_inode_aligned_pages(inode, full_hpage_start,
> +						      nr_pages);
> +	}
> +
> +	if (end > full_hpage_end && end > full_hpage_start) {
> +		pgoff_t zero_start = max(full_hpage_end, start);
> +
> +		kvm_gmem_zero_range(inode->i_mapping, zero_start, end);
> +	}
> +}
> +
>  static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
>  {
>  	struct list_head *gmem_list = &inode->i_mapping->i_private_list;
> @@ -752,7 +850,12 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
>  	list_for_each_entry(gmem, gmem_list, entry)
>  		kvm_gmem_invalidate_begin(gmem, start, end);
>  
> -	truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
> +	if (kvm_gmem_has_custom_allocator(inode)) {
> +		kvm_gmem_truncate_inode_range(inode, offset, offset + len);
> +	} else {
> +		/* Page size is PAGE_SIZE, so use optimized truncation function. */

For cases like this, put the comment outside of the if-statement.  Because what
you're documenting isn't just the call to truncate_inode_pages_range(), it's the
pivot on a "custom" allocator.

E.g. this should look more like:

	/* Comment about why KVM needs custom code for hugetlb. */
	if (kvm_gmem_is_hugetlb(inode))
		kvm_gmem_truncate_inode_range(...);
	else
		truncate_inode_pages_range(...);

> +		truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
> +	}
>  
>  	list_for_each_entry(gmem, gmem_list, entry)
>  		kvm_gmem_invalidate_end(gmem, start, end);
> @@ -776,6 +879,16 @@ static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
>  
>  	start = offset >> PAGE_SHIFT;
>  	end = (offset + len) >> PAGE_SHIFT;
> +	if (kvm_gmem_has_custom_allocator(inode)) {
> +		size_t nr_pages;
> +		void *p;
> +
> +		p = kvm_gmem_allocator_private(inode);
> +		nr_pages = kvm_gmem_allocator_ops(inode)->nr_pages_in_folio(p);
> +
> +		start = round_down(start, nr_pages);
> +		end = round_down(end, nr_pages);
> +	}

This adds sooo much noise to a stupidly simple thing.  The per-gmem page size is
constant, just add gmem_inode.page_size and gmem_inode.page_shift then this becomes:

	start = offset >> gi->page_shift;
	start = (offset + len) >> gi->page_shift;

We can do that even if we end up with a layer of indirection (which the more I
look at this, the more I'm convinced that's completely unnecessary).

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ