[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251201191940.883657-4-jaegeuk@kernel.org>
Date: Mon, 1 Dec 2025 19:16:15 +0000
From: Jaegeuk Kim <jaegeuk@...nel.org>
To: linux-kernel@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net
Cc: Jaegeuk Kim <jaegeuk@...nel.org>
Subject: [PATCH 3/4] mm/readahead: add a_ops->ra_folio_order to get a desired folio order
This patch introduces a new address operation, a_ops->ra_folio_order(), which
proposes a new folio order based on the adjusted order for page_cache_sync_ra.
Hence, each filesystem can set the desired minimum order of folio allocation
when requesting fadvise(POSIX_FADV_WILLNEED).
Signed-off-by: Jaegeuk Kim <jaegeuk@...nel.org>
---
include/linux/fs.h | 4 ++++
include/linux/pagemap.h | 12 ++++++++++++
mm/readahead.c | 6 ++++--
3 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c895146c1444..ddab68b7e03b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -472,6 +472,10 @@ struct address_space_operations {
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_folio)(struct address_space *, struct folio *);
+ /* Min folio order to allocate pages. */
+ unsigned int (*ra_folio_order)(struct address_space *mapping,
+ unsigned int order);
+
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 09b581c1d878..e1fe07477220 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -476,6 +476,18 @@ mapping_min_folio_order(const struct address_space *mapping)
return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
}
+static inline unsigned int
+mapping_ra_folio_order(struct address_space *mapping, unsigned int order)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+
+ if (!mapping->a_ops->ra_folio_order)
+ return order;
+
+ return mapping->a_ops->ra_folio_order(mapping, order);
+}
+
static inline unsigned long
mapping_min_folio_nrpages(const struct address_space *mapping)
{
diff --git a/mm/readahead.c b/mm/readahead.c
index 5beaf7803554..8c7d08af6e00 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -592,8 +592,10 @@ void page_cache_sync_ra(struct readahead_control *ractl,
* A start of file, oversized read, or sequential cache miss:
* trivial case: (index - prev_index) == 1
* unaligned reads: (index - prev_index) == 0
+ * if filesystem sets high-order allocation
*/
- if (!index || req_count > max_pages || index - prev_index <= 1UL) {
+ if (!index || req_count > max_pages || index - prev_index <= 1UL ||
+ mapping_ra_folio_order(ractl->mapping, 0)) {
ra->start = index;
ra->size = get_init_ra_size(req_count, max_pages);
ra->async_size = ra->size > req_count ? ra->size - req_count :
@@ -627,7 +629,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
ra->size = min(contig_count + req_count, max_pages);
ra->async_size = 1;
readit:
- ra->order = 0;
+ ra->order = mapping_ra_folio_order(ractl->mapping, 0);
ractl->_index = ra->start;
page_cache_ra_order(ractl, ra);
}
--
2.52.0.107.ga0afd4fd5b-goog
Powered by blists - more mailing lists