[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126045212.1381843-13-hch@lst.de>
Date: Mon, 26 Jan 2026 05:50:58 +0100
From: Christoph Hellwig <hch@....de>
To: Eric Biggers <ebiggers@...nel.org>
Cc: Al Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Jan Kara <jack@...e.cz>,
David Sterba <dsterba@...e.com>,
"Theodore Ts'o" <tytso@....edu>,
Jaegeuk Kim <jaegeuk@...nel.org>,
Chao Yu <chao@...nel.org>,
Andrey Albershteyn <aalbersh@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
linux-fsdevel@...r.kernel.org,
linux-btrfs@...r.kernel.org,
linux-ext4@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net,
fsverity@...ts.linux.dev
Subject: [PATCH 12/16] fs: consolidate fsverity_info lookup in buffer.c
Look up the fsverity_info once in end_buffer_async_read_io, and then
pass it along to the I/O completion workqueue in
struct postprocess_bh_ctx.
This amortizes the lookup better once it becomes less efficient.
Signed-off-by: Christoph Hellwig <hch@....de>
---
fs/buffer.c | 27 +++++++++++----------------
1 file changed, 11 insertions(+), 16 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index 3982253b6805..f4b3297ef1b1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -302,6 +302,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
struct postprocess_bh_ctx {
struct work_struct work;
struct buffer_head *bh;
+ struct fsverity_info *vi;
};
static void verify_bh(struct work_struct *work)
@@ -309,25 +310,14 @@ static void verify_bh(struct work_struct *work)
struct postprocess_bh_ctx *ctx =
container_of(work, struct postprocess_bh_ctx, work);
struct buffer_head *bh = ctx->bh;
- struct inode *inode = bh->b_folio->mapping->host;
bool valid;
- valid = fsverity_verify_blocks(*fsverity_info_addr(inode), bh->b_folio,
- bh->b_size, bh_offset(bh));
+ valid = fsverity_verify_blocks(ctx->vi, bh->b_folio, bh->b_size,
+ bh_offset(bh));
end_buffer_async_read(bh, valid);
kfree(ctx);
}
-static bool need_fsverity(struct buffer_head *bh)
-{
- struct folio *folio = bh->b_folio;
- struct inode *inode = folio->mapping->host;
-
- return fsverity_active(inode) &&
- /* needed by ext4 */
- folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
-}
-
static void decrypt_bh(struct work_struct *work)
{
struct postprocess_bh_ctx *ctx =
@@ -337,7 +327,7 @@ static void decrypt_bh(struct work_struct *work)
err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
bh_offset(bh));
- if (err == 0 && need_fsverity(bh)) {
+ if (err == 0 && ctx->vi) {
/*
* We use different work queues for decryption and for verity
* because verity may require reading metadata pages that need
@@ -359,15 +349,20 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
{
struct inode *inode = bh->b_folio->mapping->host;
bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
- bool verify = need_fsverity(bh);
+ struct fsverity_info *vi = NULL;
+
+ /* needed by ext4 */
+ if (bh->b_folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE))
+ vi = fsverity_get_info(inode);
/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
- if (uptodate && (decrypt || verify)) {
+ if (uptodate && (decrypt || vi)) {
struct postprocess_bh_ctx *ctx =
kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx) {
ctx->bh = bh;
+ ctx->vi = vi;
if (decrypt) {
INIT_WORK(&ctx->work, decrypt_bh);
fscrypt_enqueue_decrypt_work(&ctx->work);
--
2.47.3
Powered by blists - more mailing lists