[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250620143502.3055777-1-willy@infradead.org>
Date: Fri, 20 Jun 2025 15:34:46 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: Shivank Garg <shivankg@....com>,
seanjc@...gle.com,
david@...hat.com,
vbabka@...e.cz,
willy@...radead.org,
akpm@...ux-foundation.org,
shuah@...nel.org,
pbonzini@...hat.com,
brauner@...nel.org,
viro@...iv.linux.org.uk
Cc: ackerleytng@...gle.com,
paul@...l-moore.com,
jmorris@...ei.org,
serge@...lyn.com,
pvorel@...e.cz,
bfoster@...hat.com,
tabba@...gle.com,
vannapurve@...gle.com,
chao.gao@...el.com,
bharata@....com,
nikunj@....com,
michael.day@....com,
yan.y.zhao@...el.com,
Neeraj.Upadhyay@....com,
thomas.lendacky@....com,
michael.roth@....com,
aik@....com,
jgg@...dia.com,
kalyazin@...zon.com,
peterx@...hat.com,
jack@...e.cz,
rppt@...nel.org,
hch@...radead.org,
cgzones@...glemail.com,
ira.weiny@...el.com,
rientjes@...gle.com,
roypat@...zon.co.uk,
ziy@...dia.com,
matthew.brost@...el.com,
joshua.hahnjy@...il.com,
rakie.kim@...com,
byungchul@...com,
gourry@...rry.net,
kent.overstreet@...ux.dev,
ying.huang@...ux.alibaba.com,
apopple@...dia.com,
chao.p.peng@...el.com,
amit@...radead.org,
ddutile@...hat.com,
dan.j.williams@...el.com,
ashish.kalra@....com,
gshan@...hat.com,
jgowans@...zon.com,
pankaj.gupta@....com,
papaluri@....com,
yuzhao@...gle.com,
suzuki.poulose@....com,
quic_eberman@...cinc.com,
aneeshkumar.kizhakeveetil@....com,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
linux-security-module@...r.kernel.org,
kvm@...r.kernel.org,
linux-kselftest@...r.kernel.org,
linux-coco@...ts.linux.dev
Subject: [PATCH 1/2] filemap: Add a mempolicy argument to filemap_alloc_folio()
guest_memfd needs to support memory policies so add an argument
to filemap_alloc_folio(). All existing users pass NULL, the first
user will show up later in this series.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
fs/bcachefs/fs-io-buffered.c | 2 +-
fs/btrfs/compression.c | 3 ++-
fs/btrfs/verity.c | 2 +-
fs/erofs/zdata.c | 2 +-
fs/f2fs/compress.c | 2 +-
include/linux/pagemap.h | 6 +++---
mm/filemap.c | 13 +++++++++----
mm/readahead.c | 2 +-
8 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 66bacdd49f78..392344232b16 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -124,7 +124,7 @@ static int readpage_bio_extend(struct btree_trans *trans,
if (folio && !xa_is_value(folio))
break;
- folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), order);
+ folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), order, NULL);
if (!folio)
break;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 48d07939fee4..8430ccf70887 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -475,7 +475,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
}
folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
- ~__GFP_FS), 0);
+ ~__GFP_FS),
+ 0, NULL);
if (!folio)
break;
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index b7a96a005487..c43a789ba6d2 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -742,7 +742,7 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
}
folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
- 0);
+ 0, NULL);
if (!folio)
return ERR_PTR(-ENOMEM);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index fe8071844724..00e9160a0d24 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -562,7 +562,7 @@ static void z_erofs_bind_cache(struct z_erofs_frontend *fe)
* Allocate a managed folio for cached I/O, or it may be
* then filled with a file-backed folio for in-place I/O
*/
- newfolio = filemap_alloc_folio(gfp, 0);
+ newfolio = filemap_alloc_folio(gfp, 0, NULL);
if (!newfolio)
continue;
newfolio->private = Z_EROFS_PREALLOCATED_FOLIO;
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index b3c1df93a163..7ef937dd7624 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1942,7 +1942,7 @@ void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
return;
}
- cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0);
+ cfolio = filemap_alloc_folio(__GFP_NOWARN | __GFP_IO, 0, NULL);
if (!cfolio)
return;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e63fbfbd5b0f..c176aeeb38db 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -646,9 +646,9 @@ static inline void *detach_page_private(struct page *page)
}
#ifdef CONFIG_NUMA
-struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
+struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, struct mempolicy *policy);
#else
-static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
+static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, struct mempolicy *policy)
{
return folio_alloc_noprof(gfp, order);
}
@@ -659,7 +659,7 @@ static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int o
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
- return &filemap_alloc_folio(gfp, 0)->page;
+ return &filemap_alloc_folio(gfp, 0, NULL)->page;
}
static inline gfp_t readahead_gfp_mask(struct address_space *x)
diff --git a/mm/filemap.c b/mm/filemap.c
index bada249b9fb7..a26df313207d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -989,11 +989,16 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
EXPORT_SYMBOL_GPL(filemap_add_folio);
#ifdef CONFIG_NUMA
-struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
+struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *policy)
{
int n;
struct folio *folio;
+ if (policy)
+ return folio_alloc_mpol_noprof(gfp, order, policy,
+ NO_INTERLEAVE_INDEX, numa_node_id());
+
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
@@ -1977,7 +1982,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
err = -ENOMEM;
if (order > min_order)
alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
- folio = filemap_alloc_folio(alloc_gfp, order);
+ folio = filemap_alloc_folio(alloc_gfp, order, NULL);
if (!folio)
continue;
@@ -2516,7 +2521,7 @@ static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch)
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
return -EAGAIN;
- folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
+ folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order, NULL);
if (!folio)
return -ENOMEM;
if (iocb->ki_flags & IOCB_DONTCACHE)
@@ -3854,7 +3859,7 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
folio = filemap_get_folio(mapping, index);
if (IS_ERR(folio)) {
folio = filemap_alloc_folio(gfp,
- mapping_min_folio_order(mapping));
+ mapping_min_folio_order(mapping), NULL);
if (!folio)
return ERR_PTR(-ENOMEM);
index = mapping_align_index(mapping, index);
diff --git a/mm/readahead.c b/mm/readahead.c
index 20d36d6b055e..0b2aec0231e6 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -183,7 +183,7 @@ static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
{
struct folio *folio;
- folio = filemap_alloc_folio(gfp_mask, order);
+ folio = filemap_alloc_folio(gfp_mask, order, NULL);
if (folio && ractl->dropbehind)
__folio_set_dropbehind(folio);
--
2.47.2
Powered by blists - more mailing lists