[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200217184613.19668-16-willy@infradead.org>
Date: Mon, 17 Feb 2020 10:45:56 -0800
From: Matthew Wilcox <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
linux-btrfs@...r.kernel.org, linux-erofs@...ts.ozlabs.org,
linux-ext4@...r.kernel.org, linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com, ocfs2-devel@....oracle.com,
linux-xfs@...r.kernel.org
Subject: [PATCH v6 09/19] mm: Add page_cache_readahead_limit
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
ext4 and f2fs have duplicated the guts of the readahead code so
they can read past i_size. Instead, separate out the guts of the
readahead code so they can call it directly.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
fs/ext4/verity.c | 35 ++---------------------
fs/f2fs/verity.c | 35 ++---------------------
include/linux/pagemap.h | 4 +++
mm/readahead.c | 61 +++++++++++++++++++++++++++++------------
4 files changed, 52 insertions(+), 83 deletions(-)
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index dc5ec724d889..f6e0bf05933e 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -342,37 +342,6 @@ static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
return desc_size;
}
-/*
- * Prefetch some pages from the file's Merkle tree.
- *
- * This is basically a stripped-down version of __do_page_cache_readahead()
- * which works on pages past i_size.
- */
-static void ext4_merkle_tree_readahead(struct address_space *mapping,
- pgoff_t start_index, unsigned long count)
-{
- LIST_HEAD(pages);
- unsigned int nr_pages = 0;
- struct page *page;
- pgoff_t index;
- struct blk_plug plug;
-
- for (index = start_index; index < start_index + count; index++) {
- page = xa_load(&mapping->i_pages, index);
- if (!page || xa_is_value(page)) {
- page = __page_cache_alloc(readahead_gfp_mask(mapping));
- if (!page)
- break;
- page->index = index;
- list_add(&page->lru, &pages);
- nr_pages++;
- }
- }
- blk_start_plug(&plug);
- ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
- blk_finish_plug(&plug);
-}
-
static struct page *ext4_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
@@ -386,8 +355,8 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
if (page)
put_page(page);
else if (num_ra_pages > 1)
- ext4_merkle_tree_readahead(inode->i_mapping, index,
- num_ra_pages);
+ page_cache_readahead_limit(inode->i_mapping, NULL,
+ index, LONG_MAX, num_ra_pages, 0);
page = read_mapping_page(inode->i_mapping, index, NULL);
}
return page;
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index d7d430a6f130..71a3e36721fa 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -222,37 +222,6 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
return size;
}
-/*
- * Prefetch some pages from the file's Merkle tree.
- *
- * This is basically a stripped-down version of __do_page_cache_readahead()
- * which works on pages past i_size.
- */
-static void f2fs_merkle_tree_readahead(struct address_space *mapping,
- pgoff_t start_index, unsigned long count)
-{
- LIST_HEAD(pages);
- unsigned int nr_pages = 0;
- struct page *page;
- pgoff_t index;
- struct blk_plug plug;
-
- for (index = start_index; index < start_index + count; index++) {
- page = xa_load(&mapping->i_pages, index);
- if (!page || xa_is_value(page)) {
- page = __page_cache_alloc(readahead_gfp_mask(mapping));
- if (!page)
- break;
- page->index = index;
- list_add(&page->lru, &pages);
- nr_pages++;
- }
- }
- blk_start_plug(&plug);
- f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
- blk_finish_plug(&plug);
-}
-
static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
@@ -266,8 +235,8 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
if (page)
put_page(page);
else if (num_ra_pages > 1)
- f2fs_merkle_tree_readahead(inode->i_mapping, index,
- num_ra_pages);
+ page_cache_readahead_limit(inode->i_mapping, NULL,
+ index, LONG_MAX, num_ra_pages, 0);
page = read_mapping_page(inode->i_mapping, index, NULL);
}
return page;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index bd4291f78f41..4f36c06d064d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -389,6 +389,10 @@ extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
+void page_cache_readahead_limit(struct address_space *mapping,
+ struct file *file, pgoff_t offset, pgoff_t end_index,
+ unsigned long nr_to_read, unsigned long lookahead_size);
+
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
diff --git a/mm/readahead.c b/mm/readahead.c
index 975ff5e387be..94d499cfb657 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -142,35 +142,38 @@ static void read_pages(struct readahead_control *rac, struct list_head *pages)
blk_finish_plug(&plug);
}
-/*
- * __do_page_cache_readahead() actually reads a chunk of disk. It allocates
- * the pages first, then submits them for I/O. This avoids the very bad
- * behaviour which would occur if page allocations are causing VM writeback.
- * We really don't want to intermingle reads and writes like that.
+/**
+ * page_cache_readahead_limit - Start readahead beyond a file's i_size.
+ * @mapping: File address space.
+ * @file: This instance of the open file; used for authentication.
+ * @offset: First page index to read.
+ * @end_index: The maximum page index to read.
+ * @nr_to_read: The number of pages to read.
+ * @lookahead_size: Where to start the next readahead.
+ *
+ * This function is for filesystems to call when they want to start
+ * readahead potentially beyond a file's stated i_size. If you want
+ * to start readahead on a normal file, you probably want to call
+ * page_cache_async_readahead() or page_cache_sync_readahead() instead.
+ *
+ * Context: File is referenced by caller. Mutexes may be held by caller.
+ * May sleep, but will not reenter filesystem to reclaim memory.
*/
-void __do_page_cache_readahead(struct address_space *mapping,
- struct file *filp, pgoff_t offset, unsigned long nr_to_read,
- unsigned long lookahead_size)
+void page_cache_readahead_limit(struct address_space *mapping,
+ struct file *file, pgoff_t offset, pgoff_t end_index,
+ unsigned long nr_to_read, unsigned long lookahead_size)
{
- struct inode *inode = mapping->host;
- unsigned long end_index; /* The last page we want to read */
LIST_HEAD(page_pool);
unsigned long i;
- loff_t isize = i_size_read(inode);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
bool use_list = mapping->a_ops->readpages;
struct readahead_control rac = {
.mapping = mapping,
- .file = filp,
+ .file = file,
._start = offset,
._nr_pages = 0,
};
- if (isize == 0)
- return;
-
- end_index = ((isize - 1) >> PAGE_SHIFT);
-
/*
* Preallocate as many pages as we will need.
*/
@@ -225,6 +228,30 @@ void __do_page_cache_readahead(struct address_space *mapping,
read_pages(&rac, &page_pool);
BUG_ON(!list_empty(&page_pool));
}
+EXPORT_SYMBOL_GPL(page_cache_readahead_limit);
+
+/*
+ * __do_page_cache_readahead() actually reads a chunk of disk. It allocates
+ * the pages first, then submits them for I/O. This avoids the very bad
+ * behaviour which would occur if page allocations are causing VM writeback.
+ * We really don't want to intermingle reads and writes like that.
+ */
+void __do_page_cache_readahead(struct address_space *mapping,
+ struct file *file, pgoff_t offset, unsigned long nr_to_read,
+ unsigned long lookahead_size)
+{
+ struct inode *inode = mapping->host;
+ unsigned long end_index; /* The last page we want to read */
+ loff_t isize = i_size_read(inode);
+
+ if (isize == 0)
+ return;
+
+ end_index = ((isize - 1) >> PAGE_SHIFT);
+
+ page_cache_readahead_limit(mapping, file, offset, end_index,
+ nr_to_read, lookahead_size);
+}
/*
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
--
2.25.0
Powered by blists - more mailing lists