[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200323202259.13363-25-willy@infradead.org>
Date: Mon, 23 Mar 2020 13:22:58 -0700
From: Matthew Wilcox <willy@...radead.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-fsdevel@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, linux-btrfs@...r.kernel.org,
linux-erofs@...ts.ozlabs.org, linux-ext4@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net, cluster-devel@...hat.com,
ocfs2-devel@....oracle.com, linux-xfs@...r.kernel.org,
Dave Chinner <dchinner@...hat.com>,
William Kucharski <william.kucharski@...cle.com>
Subject: [PATCH v10 24/25] fuse: Convert from readpages to readahead
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Use the new readahead operation in fuse. Switching away from the
read_cache_pages() helper gets rid of an implicit call to put_page(),
so we can get rid of the get_page() call in fuse_readpages_fill().
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Dave Chinner <dchinner@...hat.com>
Reviewed-by: William Kucharski <william.kucharski@...cle.com>
---
fs/fuse/file.c | 46 +++++++++++++++++++---------------------------
1 file changed, 19 insertions(+), 27 deletions(-)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9d67b830fb7a..5749505bcff6 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -923,9 +923,8 @@ struct fuse_fill_data {
unsigned int max_pages;
};
-static int fuse_readpages_fill(void *_data, struct page *page)
+static int fuse_readpages_fill(struct fuse_fill_data *data, struct page *page)
{
- struct fuse_fill_data *data = _data;
struct fuse_io_args *ia = data->ia;
struct fuse_args_pages *ap = &ia->ap;
struct inode *inode = data->inode;
@@ -941,10 +940,8 @@ static int fuse_readpages_fill(void *_data, struct page *page)
fc->max_pages);
fuse_send_readpages(ia, data->file);
data->ia = ia = fuse_io_alloc(NULL, data->max_pages);
- if (!ia) {
- unlock_page(page);
+ if (!ia)
return -ENOMEM;
- }
ap = &ia->ap;
}
@@ -954,7 +951,6 @@ static int fuse_readpages_fill(void *_data, struct page *page)
return -EIO;
}
- get_page(page);
ap->pages[ap->num_pages] = page;
ap->descs[ap->num_pages].length = PAGE_SIZE;
ap->num_pages++;
@@ -962,37 +958,33 @@ static int fuse_readpages_fill(void *_data, struct page *page)
return 0;
}
-static int fuse_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+static void fuse_readahead(struct readahead_control *rac)
{
- struct inode *inode = mapping->host;
+ struct inode *inode = rac->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
- int err;
+ struct page *page;
- err = -EIO;
if (is_bad_inode(inode))
- goto out;
+ return;
- data.file = file;
+ data.file = rac->file;
data.inode = inode;
- data.nr_pages = nr_pages;
- data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages);
-;
+ data.nr_pages = readahead_count(rac);
+ data.max_pages = min_t(unsigned int, data.nr_pages, fc->max_pages);
data.ia = fuse_io_alloc(NULL, data.max_pages);
- err = -ENOMEM;
if (!data.ia)
- goto out;
+ return;
- err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
- if (!err) {
- if (data.ia->ap.num_pages)
- fuse_send_readpages(data.ia, file);
- else
- fuse_io_free(data.ia);
+ while ((page = readahead_page(rac))) {
+ if (fuse_readpages_fill(&data, page) != 0)
+ return;
}
-out:
- return err;
+
+ if (data.ia->ap.num_pages)
+ fuse_send_readpages(data.ia, rac->file);
+ else
+ fuse_io_free(data.ia);
}
static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -3373,10 +3365,10 @@ static const struct file_operations fuse_file_operations = {
static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
+ .readahead = fuse_readahead,
.writepage = fuse_writepage,
.writepages = fuse_writepages,
.launder_page = fuse_launder_page,
- .readpages = fuse_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.bmap = fuse_bmap,
.direct_IO = fuse_direct_IO,
--
2.25.1
Powered by blists - more mailing lists