[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230915183848.1018717-19-kernel@pankajraghav.com>
Date: Fri, 15 Sep 2023 20:38:43 +0200
From: Pankaj Raghav <kernel@...kajraghav.com>
To: linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org
Cc: p.raghav@...sung.com, david@...morbit.com, da.gomez@...sung.com,
akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
willy@...radead.org, djwong@...nel.org, linux-mm@...ck.org,
chandan.babu@...cle.com, mcgrof@...nel.org, gost.dev@...sung.com
Subject: [RFC 18/23] readahead: align ra start and size to mapping_min_order in ondemand_ra()
From: Luis Chamberlain <mcgrof@...nel.org>
Align the ra->start and ra->size to mapping_min_order in
ondemand_readahead(). This will ensure the folios added to the
page_cache will be aligned to mapping_min_order number of pages.
Signed-off-by: Luis Chamberlain <mcgrof@...nel.org>
---
mm/readahead.c | 29 ++++++++++++++++++++++-------
1 file changed, 22 insertions(+), 7 deletions(-)
diff --git a/mm/readahead.c b/mm/readahead.c
index 7c2660815a01..03fa6f6c8145 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -605,7 +605,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
unsigned long add_pages;
pgoff_t index = readahead_index(ractl);
pgoff_t expected, prev_index;
- unsigned int order = folio ? folio_order(folio) : 0;
+ unsigned int min_order = mapping_min_folio_order(ractl->mapping);
+ unsigned int min_nrpages = 1UL << min_order;
+ unsigned int order = folio ? folio_order(folio) : min_order;
+
+ VM_BUG_ON(ractl->_index & (min_nrpages - 1));
/*
* If the request exceeds the readahead window, allow the read to
@@ -627,9 +631,13 @@ static void ondemand_readahead(struct readahead_control *ractl,
expected = round_up(ra->start + ra->size - ra->async_size,
1UL << order);
if (index == expected || index == (ra->start + ra->size)) {
- ra->start += ra->size;
- ra->size = get_next_ra_size(ra, max_pages);
+ ra->start += round_down(ra->size, min_nrpages);
+ ra->size = get_next_ra_size(ra, min_order, max_pages);
ra->async_size = ra->size;
+
+ VM_BUG_ON(ra->size & ((1UL << min_order) - 1));
+ VM_BUG_ON(ra->start & ((1UL << min_order) - 1));
+
goto readit;
}
@@ -647,13 +655,19 @@ static void ondemand_readahead(struct readahead_control *ractl,
max_pages);
rcu_read_unlock();
+ start = round_down(start, min_nrpages);
+
+ VM_BUG_ON(start & (min_nrpages - 1));
+ VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
+
if (!start || start - index > max_pages)
return;
ra->start = start;
ra->size = start - index; /* old async_size */
- ra->size += req_size;
- ra->size = get_next_ra_size(ra, max_pages);
+ VM_BUG_ON(ra->size & (min_nrpages - 1));
+ ra->size += round_up(req_size, min_nrpages);
+ ra->size = get_next_ra_size(ra, min_order, max_pages);
ra->async_size = ra->size;
goto readit;
}
@@ -690,7 +704,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
initial_readahead:
ra->start = index;
- ra->size = get_init_ra_size(req_size, max_pages);
+ ra->size = get_init_ra_size(req_size, min_order, max_pages);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
readit:
@@ -701,7 +715,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
* Take care of maximum IO pages as above.
*/
if (index == ra->start && ra->size == ra->async_size) {
- add_pages = get_next_ra_size(ra, max_pages);
+ add_pages = get_next_ra_size(ra, min_order, max_pages);
if (ra->size + add_pages <= max_pages) {
ra->async_size = add_pages;
ra->size += add_pages;
@@ -712,6 +726,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
}
ractl->_index = ra->start;
+ VM_BUG_ON(ractl->_index & (min_nrpages - 1));
page_cache_ra_order(ractl, ra, order);
}
--
2.40.1
Powered by blists - more mailing lists