[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201022212228.15703-5-willy@infradead.org>
Date: Thu, 22 Oct 2020 22:22:26 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-block@...r.kernel.org, linux-fscrypt@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: [PATCH 4/6] fs: Hoist fscrypt decryption to bio completion handler
This is prep work for doing decryption at the BIO level instead of
the BH level. It still works on one BH at a time for now.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
fs/buffer.c | 45 +++++++++++++++++++++------------------------
1 file changed, 21 insertions(+), 24 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index ccb90081117c..627ae1d853c0 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -241,6 +241,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
return ret;
}
+/*
+ * I/O completion handler for block_read_full_page() - pages
+ * which come unlocked at the end of I/O.
+ */
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
@@ -313,28 +317,6 @@ static void decrypt_bh(struct work_struct *work)
kfree(ctx);
}
-/*
- * I/O completion handler for block_read_full_page() - pages
- * which come unlocked at the end of I/O.
- */
-static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
-{
- /* Decrypt if needed */
- if (uptodate &&
- fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
- struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
-
- if (ctx) {
- INIT_WORK(&ctx->work, decrypt_bh);
- ctx->bh = bh;
- fscrypt_enqueue_decrypt_work(&ctx->work);
- return;
- }
- uptodate = 0;
- }
- end_buffer_async_read(bh, uptodate);
-}
-
/*
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
@@ -404,7 +386,7 @@ EXPORT_SYMBOL(end_buffer_async_write);
*/
static void mark_buffer_async_read(struct buffer_head *bh)
{
- bh->b_end_io = end_buffer_async_read_io;
+ bh->b_end_io = end_buffer_async_read;
set_buffer_async_read(bh);
}
@@ -3103,11 +3085,26 @@ EXPORT_SYMBOL(generic_block_bmap);
static void end_bio_bh_io_sync(struct bio *bio)
{
struct buffer_head *bh = bio->bi_private;
+ int uptodate = !bio->bi_status;
if (unlikely(bio_flagged(bio, BIO_QUIET)))
set_bit(BH_Quiet, &bh->b_state);
- bh->b_end_io(bh, !bio->bi_status);
+ /* Decrypt if needed */
+ if ((bio_data_dir(bio) == READ) && uptodate &&
+ fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
+ struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+
+ if (ctx) {
+ INIT_WORK(&ctx->work, decrypt_bh);
+ ctx->bh = bh;
+ fscrypt_enqueue_decrypt_work(&ctx->work);
+ bio_put(bio);
+ return;
+ }
+ uptodate = 0;
+ }
+ bh->b_end_io(bh, uptodate);
bio_put(bio);
}
--
2.28.0
Powered by blists - more mailing lists