[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <02aad35b728f4918e62dc6eb1d1d5546487b099e.1760731772.git.ackerleytng@google.com>
Date: Fri, 17 Oct 2025 13:11:50 -0700
From: Ackerley Tng <ackerleytng@...gle.com>
To: cgroups@...r.kernel.org, kvm@...r.kernel.org, linux-doc@...r.kernel.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org, linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org, x86@...nel.org
Cc: ackerleytng@...gle.com, akpm@...ux-foundation.org,
binbin.wu@...ux.intel.com, bp@...en8.de, brauner@...nel.org,
chao.p.peng@...el.com, chenhuacai@...nel.org, corbet@....net,
dave.hansen@...el.com, dave.hansen@...ux.intel.com, david@...hat.com,
dmatlack@...gle.com, erdemaktas@...gle.com, fan.du@...el.com, fvdl@...gle.com,
haibo1.xu@...el.com, hannes@...xchg.org, hch@...radead.org, hpa@...or.com,
hughd@...gle.com, ira.weiny@...el.com, isaku.yamahata@...el.com, jack@...e.cz,
james.morse@....com, jarkko@...nel.org, jgg@...pe.ca, jgowans@...zon.com,
jhubbard@...dia.com, jroedel@...e.de, jthoughton@...gle.com,
jun.miao@...el.com, kai.huang@...el.com, keirf@...gle.com,
kent.overstreet@...ux.dev, liam.merwick@...cle.com,
maciej.wieczor-retman@...el.com, mail@...iej.szmigiero.name,
maobibo@...ngson.cn, mathieu.desnoyers@...icios.com, maz@...nel.org,
mhiramat@...nel.org, mhocko@...nel.org, mic@...ikod.net, michael.roth@....com,
mingo@...hat.com, mlevitsk@...hat.com, mpe@...erman.id.au,
muchun.song@...ux.dev, nikunj@....com, nsaenz@...zon.es,
oliver.upton@...ux.dev, palmer@...belt.com, pankaj.gupta@....com,
paul.walmsley@...ive.com, pbonzini@...hat.com, peterx@...hat.com,
pgonda@...gle.com, prsampat@....com, pvorel@...e.cz, qperret@...gle.com,
richard.weiyang@...il.com, rick.p.edgecombe@...el.com, rientjes@...gle.com,
rostedt@...dmis.org, roypat@...zon.co.uk, rppt@...nel.org, seanjc@...gle.com,
shakeel.butt@...ux.dev, shuah@...nel.org, steven.price@....com,
steven.sistare@...cle.com, suzuki.poulose@....com, tabba@...gle.com,
tglx@...utronix.de, thomas.lendacky@....com, vannapurve@...gle.com,
vbabka@...e.cz, viro@...iv.linux.org.uk, vkuznets@...hat.com,
wei.w.wang@...el.com, will@...nel.org, willy@...radead.org, wyihan@...gle.com,
xiaoyao.li@...el.com, yan.y.zhao@...el.com, yilun.xu@...el.com,
yuzenghui@...wei.com, zhiquan1.li@...el.com
Subject: [RFC PATCH v1 09/37] KVM: guest_memfd: Skip LRU for guest_memfd folios
filemap_add_folio(), called from filemap_grab_folio(), adds folios to
an LRU list. This is unnecessary for guest_memfd, which does not
participate in swapping.
In addition, the LRU list takes a reference count on the folio. With
shared-to-private memory conversions for KVM guests dependent on folio
refcounts, this extra reference can cause conversions to fail due to
unexpected refcounts.
Rework kvm_gmem_get_folio() to manually allocate and insert the folio
into the page cache without placing it on the LRU. This is done by
calling __filemap_add_folio() directly.
The folio is then marked unevictable to avoid participation in
swapping. The ->free_folio() handler is modified to unset the
unevictable flag when the folio is released from guest_memfd.
This change ensures that LRU lists no longer take refcounts on
guest_memfd folios, significantly reducing the chance of elevated
refcounts during conversion.
To facilitate this, __filemap_add_folio is exported for KVM's use.
Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
mm/filemap.c | 1 +
mm/memcontrol.c | 2 ++
virt/kvm/guest_memfd.c | 60 +++++++++++++++++++++++++++++++++---------
3 files changed, 50 insertions(+), 13 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 03f223be575ca..60c7c95bbd7e6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -954,6 +954,7 @@ noinline int __filemap_add_folio(struct address_space *mapping,
return xas_error(&xas);
}
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
+EXPORT_SYMBOL_FOR_MODULES(__filemap_add_folio, "kvm");
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8dd7fbed5a942..fe8629414d0a9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4721,6 +4721,7 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
return ret;
}
+EXPORT_SYMBOL_FOR_MODULES(__mem_cgroup_charge, "kvm");
/**
* mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
@@ -4893,6 +4894,7 @@ void __mem_cgroup_uncharge(struct folio *folio)
uncharge_folio(folio, &ug);
uncharge_batch(&ug);
}
+EXPORT_SYMBOL_FOR_MODULES(__mem_cgroup_uncharge, "kvm");
void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 2a9e9220a48aa..dab2b3ce78bc8 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -148,6 +148,41 @@ static struct mempolicy *kvm_gmem_get_folio_policy(struct gmem_inode *gi,
#endif
}
+static struct folio *__kvm_gmem_get_folio(struct address_space *mapping,
+ pgoff_t index,
+ struct mempolicy *policy)
+{
+ const gfp_t gfp = mapping_gfp_mask(mapping);
+ struct folio *folio;
+ int err;
+
+ folio = filemap_lock_folio(mapping, index);
+ if (!IS_ERR(folio))
+ return folio;
+
+ folio = filemap_alloc_folio(gfp, 0, policy);
+ if (!folio)
+ return ERR_PTR(-ENOMEM);
+
+ err = mem_cgroup_charge(folio, NULL, gfp);
+ if (err)
+ goto err_put;
+
+ __folio_set_locked(folio);
+
+ err = __filemap_add_folio(mapping, folio, index, gfp, NULL);
+ if (err) {
+ __folio_clear_locked(folio);
+ goto err_put;
+ }
+
+ return folio;
+
+err_put:
+ folio_put(folio);
+ return ERR_PTR(err);
+}
+
/*
* Returns a locked folio on success. The caller is responsible for
* setting the up-to-date flag before the memory is mapped into the guest.
@@ -160,6 +195,7 @@ static struct mempolicy *kvm_gmem_get_folio_policy(struct gmem_inode *gi,
static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
{
/* TODO: Support huge pages. */
+ struct address_space *mapping = inode->i_mapping;
struct mempolicy *policy;
struct folio *folio;
@@ -167,16 +203,17 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
* Fast-path: See if folio is already present in mapping to avoid
* policy_lookup.
*/
- folio = filemap_lock_folio(inode->i_mapping, index);
+ folio = filemap_lock_folio(mapping, index);
if (!IS_ERR(folio))
return folio;
policy = kvm_gmem_get_folio_policy(GMEM_I(inode), index);
- folio = __filemap_get_folio_mpol(inode->i_mapping, index,
- FGP_LOCK | FGP_CREAT,
- mapping_gfp_mask(inode->i_mapping), policy);
- mpol_cond_put(policy);
+ do {
+ folio = __kvm_gmem_get_folio(mapping, index, policy);
+ } while (IS_ERR(folio) && PTR_ERR(folio) == -EEXIST);
+
+ mpol_cond_put(policy);
return folio;
}
@@ -588,24 +625,21 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
return MF_DELAYED;
}
-#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
static void kvm_gmem_free_folio(struct folio *folio)
{
- struct page *page = folio_page(folio, 0);
- kvm_pfn_t pfn = page_to_pfn(page);
- int order = folio_order(folio);
+ folio_clear_unevictable(folio);
- kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
-}
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+ kvm_arch_gmem_invalidate(folio_pfn(folio),
+ folio_pfn(folio) + folio_nr_pages(folio));
#endif
+}
static const struct address_space_operations kvm_gmem_aops = {
.dirty_folio = noop_dirty_folio,
.migrate_folio = kvm_gmem_migrate_folio,
.error_remove_folio = kvm_gmem_error_folio,
-#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
.free_folio = kvm_gmem_free_folio,
-#endif
};
static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
--
2.51.0.858.gf9c4a03a3a-goog
Powered by blists - more mailing lists