[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201022212228.15703-4-willy@infradead.org>
Date: Thu, 22 Oct 2020 22:22:25 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-block@...r.kernel.org, linux-fscrypt@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: [PATCH 3/6] fs: Convert block_read_full_page to be synchronous
Use the new blk_completion infrastructure to wait for multiple I/Os.
Also coalesce adjacent buffer heads into a single BIO instead of
submitting one BIO per buffer head. This doesn't work for fscrypt yet,
so keep the old code around for now.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
fs/buffer.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 90 insertions(+)
diff --git a/fs/buffer.c b/fs/buffer.c
index 1b0ba1d59966..ccb90081117c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2249,6 +2249,87 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
}
EXPORT_SYMBOL(block_is_partially_uptodate);
+static void readpage_end_bio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ struct page *page;
+ struct buffer_head *bh;
+ int i, nr = 0;
+
+ bio_for_each_bvec_all(bvec, bio, i) {
+ size_t offset = 0;
+ size_t max = bvec->bv_offset + bvec->bv_len;
+
+ page = bvec->bv_page;
+ bh = page_buffers(page);
+
+ for (offset = 0; offset < max; offset += bh->b_size,
+ bh = bh->b_this_page) {
+ if (offset < bvec->bv_offset)
+ continue;
+ BUG_ON(bh_offset(bh) != offset);
+ nr++;
+ if (unlikely(bio_flagged(bio, BIO_QUIET)))
+ set_bit(BH_Quiet, &bh->b_state);
+ if (bio->bi_status == BLK_STS_OK)
+ set_buffer_uptodate(bh);
+ else
+ buffer_io_error(bh, ", async page read");
+ unlock_buffer(bh);
+ }
+ }
+
+ if (blk_completion_sub(bio->bi_private, bio->bi_status, nr) < 0)
+ unlock_page(page);
+ bio_put(bio);
+}
+
+static int readpage_submit_bhs(struct page *page, struct blk_completion *cmpl,
+ unsigned int nr, struct buffer_head **bhs)
+{
+ struct bio *bio = NULL;
+ unsigned int i;
+ int err;
+
+ blk_completion_init(cmpl, nr);
+
+ for (i = 0; i < nr; i++) {
+ struct buffer_head *bh = bhs[i];
+ sector_t sector = bh->b_blocknr * (bh->b_size >> 9);
+ bool same_page;
+
+ if (buffer_uptodate(bh)) {
+ end_buffer_async_read(bh, 1);
+ blk_completion_sub(cmpl, BLK_STS_OK, 1);
+ continue;
+ }
+ if (bio) {
+ if (bio_end_sector(bio) == sector &&
+ __bio_try_merge_page(bio, bh->b_page, bh->b_size,
+ bh_offset(bh), &same_page))
+ continue;
+ submit_bio(bio);
+ }
+ bio = bio_alloc(GFP_NOIO, 1);
+ bio_set_dev(bio, bh->b_bdev);
+ bio->bi_iter.bi_sector = sector;
+ bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+ bio->bi_end_io = readpage_end_bio;
+ bio->bi_private = cmpl;
+ /* Take care of bh's that straddle the end of the device */
+ guard_bio_eod(bio);
+ }
+
+ if (bio)
+ submit_bio(bio);
+
+ err = blk_completion_wait_killable(cmpl);
+ if (!err)
+ return AOP_UPDATED_PAGE;
+ unlock_page(page);
+ return err;
+}
+
/*
* Generic "read page" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
@@ -2258,6 +2339,7 @@ EXPORT_SYMBOL(block_is_partially_uptodate);
*/
int block_read_full_page(struct page *page, get_block_t *get_block)
{
+ struct blk_completion *cmpl = kmalloc(sizeof(*cmpl), GFP_NOIO);
struct inode *inode = page->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
@@ -2265,6 +2347,9 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
int nr, i, err = 0;
int fully_mapped = 1;
+ if (!cmpl)
+ return -ENOMEM;
+
head = create_page_buffers(page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
@@ -2303,6 +2388,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
} while (i++, iblock++, (bh = bh->b_this_page) != head);
if (err) {
+ kfree(cmpl);
unlock_page(page);
return err;
}
@@ -2322,6 +2408,10 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
mark_buffer_async_read(bh);
}
+ if (!fscrypt_inode_uses_fs_layer_crypto(inode))
+ return readpage_submit_bhs(page, cmpl, nr, arr);
+ kfree(cmpl);
+
/*
* Stage 3: start the IO. Check for uptodateness
* inside the buffer lock in case another process reading
--
2.28.0
Powered by blists - more mailing lists