diff --git a/mm/readahead.c b/mm/readahead.c index b415c9969176..8462e744a890 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -467,7 +467,7 @@ void page_cache_ra_order(struct readahead_control *ractl, struct address_space *mapping = ractl->mapping; pgoff_t start = readahead_index(ractl); pgoff_t index = start; - unsigned int min_order = mapping_min_folio_order(mapping); + unsigned int min_order; pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; unsigned int nofs; @@ -475,6 +475,10 @@ void page_cache_ra_order(struct readahead_control *ractl, gfp_t gfp = readahead_gfp_mask(mapping); unsigned int new_order = ra->order; + /* See comment in page_cache_ra_unbounded() */ + nofs = memalloc_nofs_save(); + filemap_invalidate_lock_shared(mapping); + trace_page_cache_ra_order(mapping->host, start, ra); if (!mapping_large_folio_support(mapping)) { ra->order = 0; @@ -483,15 +487,14 @@ void page_cache_ra_order(struct readahead_control *ractl, limit = min(limit, index + ra->size - 1); + min_order = mapping_min_folio_order(mapping); + new_order = min(mapping_max_folio_order(mapping), new_order); new_order = min_t(unsigned int, new_order, ilog2(ra->size)); new_order = max(new_order, min_order); ra->order = new_order; - /* See comment in page_cache_ra_unbounded() */ - nofs = memalloc_nofs_save(); - filemap_invalidate_lock_shared(mapping); /* * If the new_order is greater than min_order and index is * already aligned to new_order, then this will be noop as index @@ -516,8 +519,6 @@ void page_cache_ra_order(struct readahead_control *ractl, } read_pages(ractl); - filemap_invalidate_unlock_shared(mapping); - memalloc_nofs_restore(nofs); /* * If there were already pages in the page cache, then we may have @@ -525,7 +526,7 @@ void page_cache_ra_order(struct readahead_control *ractl, * situation below. */ if (!err) - return; + goto end; fallback: /* * ->readahead() may have updated readahead window size so we have to @@ -534,6 +535,9 @@ void page_cache_ra_order(struct readahead_control *ractl, if (ra->size > index - start) do_page_cache_ra(ractl, ra->size - (index - start), ra->async_size); +end: + filemap_invalidate_unlock_shared(mapping); + memalloc_nofs_restore(nofs); } static unsigned long ractl_max_pages(struct readahead_control *ractl,