[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250618112935.7629-4-shivankg@amd.com>
Date: Wed, 18 Jun 2025 11:29:31 +0000
From: Shivank Garg <shivankg@....com>
To: <seanjc@...gle.com>, <david@...hat.com>, <vbabka@...e.cz>,
<willy@...radead.org>, <akpm@...ux-foundation.org>, <shuah@...nel.org>,
<pbonzini@...hat.com>, <brauner@...nel.org>, <viro@...iv.linux.org.uk>
CC: <ackerleytng@...gle.com>, <paul@...l-moore.com>, <jmorris@...ei.org>,
<serge@...lyn.com>, <pvorel@...e.cz>, <bfoster@...hat.com>,
<tabba@...gle.com>, <vannapurve@...gle.com>, <chao.gao@...el.com>,
<bharata@....com>, <nikunj@....com>, <michael.day@....com>,
<yan.y.zhao@...el.com>, <Neeraj.Upadhyay@....com>, <thomas.lendacky@....com>,
<michael.roth@....com>, <aik@....com>, <jgg@...dia.com>,
<kalyazin@...zon.com>, <peterx@...hat.com>, <shivankg@....com>,
<jack@...e.cz>, <rppt@...nel.org>, <hch@...radead.org>,
<cgzones@...glemail.com>, <ira.weiny@...el.com>, <rientjes@...gle.com>,
<roypat@...zon.co.uk>, <ziy@...dia.com>, <matthew.brost@...el.com>,
<joshua.hahnjy@...il.com>, <rakie.kim@...com>, <byungchul@...com>,
<gourry@...rry.net>, <kent.overstreet@...ux.dev>,
<ying.huang@...ux.alibaba.com>, <apopple@...dia.com>,
<chao.p.peng@...el.com>, <amit@...radead.org>, <ddutile@...hat.com>,
<dan.j.williams@...el.com>, <ashish.kalra@....com>, <gshan@...hat.com>,
<jgowans@...zon.com>, <pankaj.gupta@....com>, <papaluri@....com>,
<yuzhao@...gle.com>, <suzuki.poulose@....com>, <quic_eberman@...cinc.com>,
<aneeshkumar.kizhakeveetil@....com>, <linux-fsdevel@...r.kernel.org>,
<linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>,
<linux-security-module@...r.kernel.org>, <kvm@...r.kernel.org>,
<linux-kselftest@...r.kernel.org>, <linux-coco@...ts.linux.dev>
Subject: [RFC PATCH v8 3/7] mm/filemap: Add mempolicy support to the filemap layer
From: Shivansh Dhiman <shivansh.dhiman@....com>
Add NUMA mempolicy support to the filemap allocation path by introducing
new APIs that take a mempolicy argument:
- filemap_grab_folio_mpol()
- filemap_alloc_folio_mpol()
- __filemap_get_folio_mpol()
These APIs allow callers to specify a NUMA policy during page cache
allocations, enabling fine-grained control over memory placement. This is
particularly needed by KVM when using guest-memfd memory backends, where
the guest memory needs to be allocated according to the NUMA policy
specified by VMM.
The existing non-mempolicy APIs remain unchanged and continue to use the
default allocation behavior.
Signed-off-by: Shivansh Dhiman <shivansh.dhiman@....com>
Co-developed-by: Shivank Garg <shivankg@....com>
Signed-off-by: Shivank Garg <shivankg@....com>
---
include/linux/pagemap.h | 41 +++++++++++++++++++++++++++++++++++++++++
mm/filemap.c | 27 +++++++++++++++++++++++----
2 files changed, 64 insertions(+), 4 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e63fbfbd5b0f..6558c672740d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -647,15 +647,24 @@ static inline void *detach_page_private(struct page *page)
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
+struct folio *filemap_alloc_folio_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx);
#else
static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
{
return folio_alloc_noprof(gfp, order);
}
+static inline struct folio *filemap_alloc_folio_mpol_noprof(gfp_t gfp,
+ unsigned int order, struct mempolicy *mpol, pgoff_t ilx)
+{
+ return filemap_alloc_folio_noprof(gfp, order);
+}
#endif
#define filemap_alloc_folio(...) \
alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
+#define filemap_alloc_folio_mpol(...) \
+ alloc_hooks(filemap_alloc_folio_mpol_noprof(__VA_ARGS__))
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
@@ -747,6 +756,8 @@ static inline fgf_t fgf_set_order(size_t size)
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
+struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
+ pgoff_t index, fgf_t fgp_flags, gfp_t gfp, struct mempolicy *mpol, pgoff_t ilx);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
@@ -805,6 +816,36 @@ static inline struct folio *filemap_grab_folio(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
+/**
+ * filemap_grab_folio_mpol - grab a folio from the page cache.
+ * @mapping: The address space to search.
+ * @index: The page index.
+ * @mpol: The mempolicy to apply when allocating a new folio.
+ * @ilx: The interleave index, for use only with MPOL_INTERLEAVE or
+ * MPOL_WEIGHTED_INTERLEAVE.
+ *
+ * Same as filemap_grab_folio(), except that it allocates the folio using
+ * given memory policy.
+ *
+ * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
+ * and failed to create a folio.
+ */
+#ifdef CONFIG_NUMA
+static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping,
+ pgoff_t index, struct mempolicy *mpol, pgoff_t ilx)
+{
+ return __filemap_get_folio_mpol(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(mapping), mpol, ilx);
+}
+#else
+static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping,
+ pgoff_t index, struct mempolicy *mpol, pgoff_t ilx)
+{
+ return filemap_grab_folio(mapping, index);
+}
+#endif /* CONFIG_NUMA */
+
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
diff --git a/mm/filemap.c b/mm/filemap.c
index bada249b9fb7..c7e913b91636 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1007,6 +1007,15 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
return folio_alloc_noprof(gfp, order);
}
EXPORT_SYMBOL(filemap_alloc_folio_noprof);
+
+struct folio *filemap_alloc_folio_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx)
+{
+ if (mpol)
+ return folio_alloc_mpol_noprof(gfp, order, mpol,
+ ilx, numa_node_id());
+ return filemap_alloc_folio_noprof(gfp, order);
+}
#endif
/*
@@ -1891,11 +1900,14 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
}
/**
- * __filemap_get_folio - Find and get a reference to a folio.
+ * __filemap_get_folio_mpol - Find and get a reference to a folio.
* @mapping: The address_space to search.
* @index: The page index.
* @fgp_flags: %FGP flags modify how the folio is returned.
* @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
+ * @mpol: The mempolicy to apply when allocating a new folio.
+ * @ilx: The interleave index, for use only with MPOL_INTERLEAVE or
+ * MPOL_WEIGHTED_INTERLEAVE.
*
* Looks up the page cache entry at @mapping & @index.
*
@@ -1906,8 +1918,8 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
*
* Return: The found folio or an ERR_PTR() otherwise.
*/
-struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
- fgf_t fgp_flags, gfp_t gfp)
+struct folio *__filemap_get_folio_mpol(struct address_space *mapping, pgoff_t index,
+ fgf_t fgp_flags, gfp_t gfp, struct mempolicy *mpol, pgoff_t ilx)
{
struct folio *folio;
@@ -1977,7 +1989,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
err = -ENOMEM;
if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
- folio = filemap_alloc_folio(alloc_gfp, order);
+ folio = filemap_alloc_folio_mpol(alloc_gfp, order, mpol, ilx);
if (!folio)
continue;
@@ -2024,6 +2036,13 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
folio_clear_dropbehind(folio);
return folio;
}
+EXPORT_SYMBOL_GPL(__filemap_get_folio_mpol);
+
+struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
+ fgf_t fgp_flags, gfp_t gfp)
+{
+ return __filemap_get_folio_mpol(mapping, index, fgp_flags, gfp, NULL, 0);
+}
EXPORT_SYMBOL(__filemap_get_folio);
static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
--
2.43.0
Powered by blists - more mailing lists