[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210712030701.4000097-137-willy@infradead.org>
Date: Mon, 12 Jul 2021 04:07:00 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-kernel@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org
Subject: [PATCH v13 136/137] mm/readahead: Convert page_cache_async_ra() to take a folio
This lets us pass the folio in directly from filemap_readahead(), but its
primary reason is to enable us to pass a folio to ondemand_readahead()
in the next patch.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
include/linux/pagemap.h | 4 ++--
mm/filemap.c | 5 +++--
mm/readahead.c | 6 +++---
3 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 71844b55d0a8..51784f8b9b32 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -955,7 +955,7 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
-void page_cache_async_ra(struct readahead_control *, struct page *,
+void page_cache_async_ra(struct readahead_control *, struct folio *,
unsigned long req_count);
void readahead_expand(struct readahead_control *ractl,
loff_t new_start, size_t new_len);
@@ -1002,7 +1002,7 @@ void page_cache_async_readahead(struct address_space *mapping,
struct page *page, pgoff_t index, unsigned long req_count)
{
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
- page_cache_async_ra(&ractl, page, req_count);
+ page_cache_async_ra(&ractl, page_folio(page), req_count);
}
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
diff --git a/mm/filemap.c b/mm/filemap.c
index 57dd01c5060c..2fda11f583a5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2419,10 +2419,11 @@ static int filemap_readahead(struct kiocb *iocb, struct file *file,
struct address_space *mapping, struct folio *folio,
pgoff_t last_index)
{
+ DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
+
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
- page_cache_async_readahead(mapping, &file->f_ra, file, &folio->page,
- folio->index, last_index - folio->index);
+ page_cache_async_ra(&ractl, folio, last_index - folio->index);
return 0;
}
diff --git a/mm/readahead.c b/mm/readahead.c
index d589f147f4c2..30115a21e304 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -580,7 +580,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
EXPORT_SYMBOL_GPL(page_cache_sync_ra);
void page_cache_async_ra(struct readahead_control *ractl,
- struct page *page, unsigned long req_count)
+ struct folio *folio, unsigned long req_count)
{
/* no read-ahead */
if (!ractl->ra->ra_pages)
@@ -589,10 +589,10 @@ void page_cache_async_ra(struct readahead_control *ractl,
/*
* Same bit is used for PG_readahead and PG_reclaim.
*/
- if (PageWriteback(page))
+ if (folio_writeback(folio))
return;
- ClearPageReadahead(page);
+ folio_clear_readahead_flag(folio);
/*
* Defer asynchronous read-ahead on IO congestion.
--
2.30.2
Powered by blists - more mailing lists