[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240213164652.GW6184@frogsfrogsfrogs>
Date: Tue, 13 Feb 2024 08:46:52 -0800
From: "Darrick J. Wong" <djwong@...nel.org>
To: "Pankaj Raghav (Samsung)" <kernel@...kajraghav.com>
Cc: linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
mcgrof@...nel.org, gost.dev@...sung.com, akpm@...ux-foundation.org,
kbusch@...nel.org, chandan.babu@...cle.com, p.raghav@...sung.com,
linux-kernel@...r.kernel.org, hare@...e.de, willy@...radead.org,
linux-mm@...ck.org, david@...morbit.com
Subject: Re: [RFC v2 05/14] readahead: align index to mapping_min_order in
ondemand_ra and force_ra
On Tue, Feb 13, 2024 at 10:37:04AM +0100, Pankaj Raghav (Samsung) wrote:
> From: Luis Chamberlain <mcgrof@...nel.org>
>
> Align the ra->start and ra->size to mapping_min_order in
> ondemand_readahead(), and align the index to mapping_min_order in
> force_page_cache_ra(). This will ensure that the folios allocated for
> readahead that are added to the page cache are aligned to
> mapping_min_order.
>
> Signed-off-by: Luis Chamberlain <mcgrof@...nel.org>
> Signed-off-by: Pankaj Raghav <p.raghav@...sung.com>
Acked-by: Darrick J. Wong <djwong@...nel.org>
--D
> ---
> mm/readahead.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
> 1 file changed, 40 insertions(+), 8 deletions(-)
>
> diff --git a/mm/readahead.c b/mm/readahead.c
> index 4fa7d0e65706..5e1ec7705c78 100644
> --- a/mm/readahead.c
> +++ b/mm/readahead.c
> @@ -315,6 +315,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
> struct file_ra_state *ra = ractl->ra;
> struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
> unsigned long max_pages, index;
> + unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
>
> if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
> return;
> @@ -324,6 +325,13 @@ void force_page_cache_ra(struct readahead_control *ractl,
> * be up to the optimal hardware IO size
> */
> index = readahead_index(ractl);
> + if (!IS_ALIGNED(index, min_nrpages)) {
> + unsigned long old_index = index;
> +
> + index = round_down(index, min_nrpages);
> + nr_to_read += (old_index - index);
> + }
> +
> max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
> nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
> while (nr_to_read) {
> @@ -332,6 +340,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
> if (this_chunk > nr_to_read)
> this_chunk = nr_to_read;
> ractl->_index = index;
> + VM_BUG_ON(!IS_ALIGNED(index, min_nrpages));
> do_page_cache_ra(ractl, this_chunk, 0);
>
> index += this_chunk;
> @@ -344,11 +353,20 @@ void force_page_cache_ra(struct readahead_control *ractl,
> * for small size, x 4 for medium, and x 2 for large
> * for 128k (32 page) max ra
> * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
> + *
> + * For higher order address space requirements we ensure no initial reads
> + * are ever less than the min number of pages required.
> + *
> + * We *always* cap the max io size allowed by the device.
> */
> -static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> +static unsigned long get_init_ra_size(unsigned long size,
> + unsigned int min_nrpages,
> + unsigned long max)
> {
> unsigned long newsize = roundup_pow_of_two(size);
>
> + newsize = max_t(unsigned long, newsize, min_nrpages);
> +
> if (newsize <= max / 32)
> newsize = newsize * 4;
> else if (newsize <= max / 4)
> @@ -356,6 +374,8 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> else
> newsize = max;
>
> + VM_BUG_ON(newsize & (min_nrpages - 1));
> +
> return newsize;
> }
>
> @@ -364,14 +384,16 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
> * return it as the new window size.
> */
> static unsigned long get_next_ra_size(struct file_ra_state *ra,
> + unsigned int min_nrpages,
> unsigned long max)
> {
> - unsigned long cur = ra->size;
> + unsigned long cur = max(ra->size, min_nrpages);
>
> if (cur < max / 16)
> return 4 * cur;
> if (cur <= max / 2)
> return 2 * cur;
> +
> return max;
> }
>
> @@ -561,7 +583,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
> unsigned long add_pages;
> pgoff_t index = readahead_index(ractl);
> pgoff_t expected, prev_index;
> - unsigned int order = folio ? folio_order(folio) : 0;
> + unsigned int min_order = mapping_min_folio_order(ractl->mapping);
> + unsigned int min_nrpages = mapping_min_folio_nrpages(ractl->mapping);
> + unsigned int order = folio ? folio_order(folio) : min_order;
> +
> + VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
>
> /*
> * If the request exceeds the readahead window, allow the read to
> @@ -583,8 +609,8 @@ static void ondemand_readahead(struct readahead_control *ractl,
> expected = round_down(ra->start + ra->size - ra->async_size,
> 1UL << order);
> if (index == expected || index == (ra->start + ra->size)) {
> - ra->start += ra->size;
> - ra->size = get_next_ra_size(ra, max_pages);
> + ra->start += round_down(ra->size, min_nrpages);
> + ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
> ra->async_size = ra->size;
> goto readit;
> }
> @@ -603,13 +629,18 @@ static void ondemand_readahead(struct readahead_control *ractl,
> max_pages);
> rcu_read_unlock();
>
> + start = round_down(start, min_nrpages);
> +
> + VM_BUG_ON(folio->index & (folio_nr_pages(folio) - 1));
> +
> if (!start || start - index > max_pages)
> return;
>
> ra->start = start;
> ra->size = start - index; /* old async_size */
> +
> ra->size += req_size;
> - ra->size = get_next_ra_size(ra, max_pages);
> + ra->size = get_next_ra_size(ra, min_nrpages, max_pages);
> ra->async_size = ra->size;
> goto readit;
> }
> @@ -646,7 +677,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
>
> initial_readahead:
> ra->start = index;
> - ra->size = get_init_ra_size(req_size, max_pages);
> + ra->size = get_init_ra_size(req_size, min_nrpages, max_pages);
> ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
>
> readit:
> @@ -657,7 +688,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
> * Take care of maximum IO pages as above.
> */
> if (index == ra->start && ra->size == ra->async_size) {
> - add_pages = get_next_ra_size(ra, max_pages);
> + add_pages = get_next_ra_size(ra, min_nrpages, max_pages);
> if (ra->size + add_pages <= max_pages) {
> ra->async_size = add_pages;
> ra->size += add_pages;
> @@ -668,6 +699,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
> }
>
> ractl->_index = ra->start;
> + VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
> page_cache_ra_order(ractl, ra, order);
> }
>
> --
> 2.43.0
>
>
Powered by blists - more mailing lists