[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126045212.1381843-16-hch@lst.de>
Date: Mon, 26 Jan 2026 05:51:01 +0100
From: Christoph Hellwig <hch@....de>
To: Eric Biggers <ebiggers@...nel.org>
Cc: Al Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Jan Kara <jack@...e.cz>,
David Sterba <dsterba@...e.com>,
"Theodore Ts'o" <tytso@....edu>,
Jaegeuk Kim <jaegeuk@...nel.org>,
Chao Yu <chao@...nel.org>,
Andrey Albershteyn <aalbersh@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
linux-fsdevel@...r.kernel.org,
linux-btrfs@...r.kernel.org,
linux-ext4@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net,
fsverity@...ts.linux.dev
Subject: [PATCH 15/16] btrfs: consolidate fsverity_info lookup
Look up the fsverity_info once in btrfs_do_readpage, and then use it
for all operations performed there, and do the same in end_folio_read
for all folios processed there. The latter is also changed to derive
the inode from the btrfs_bio - while bbio->inode is optional, it is
always set for buffered reads.
This amortizes the lookup better once it becomes less efficient.
Signed-off-by: Christoph Hellwig <hch@....de>
Acked-by: David Sterba <dsterba@...e.com>
---
fs/btrfs/extent_io.c | 38 +++++++++++++++++++++++---------------
1 file changed, 23 insertions(+), 15 deletions(-)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 21430b7d8f27..fe96f060725a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -476,26 +476,25 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
end, page_ops);
}
-static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
+static bool btrfs_verify_folio(struct fsverity_info *vi, struct folio *folio,
+ u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
- if (!fsverity_active(folio->mapping->host) ||
- btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
- start >= i_size_read(folio->mapping->host))
+ if (!vi || btrfs_folio_test_uptodate(fs_info, folio, start, len))
return true;
- return fsverity_verify_folio(*fsverity_info_addr(folio->mapping->host),
- folio);
+ return fsverity_verify_folio(vi, folio);
}
-static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
+static void end_folio_read(struct fsverity_info *vi, struct folio *folio,
+ bool uptodate, u64 start, u32 len)
{
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
ASSERT(folio_pos(folio) <= start &&
start + len <= folio_next_pos(folio));
- if (uptodate && btrfs_verify_folio(folio, start, len))
+ if (uptodate && btrfs_verify_folio(vi, folio, start, len))
btrfs_folio_set_uptodate(fs_info, folio, start, len);
else
btrfs_folio_clear_uptodate(fs_info, folio, start, len);
@@ -575,14 +574,19 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
static void end_bbio_data_read(struct btrfs_bio *bbio)
{
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
+ struct inode *inode = &bbio->inode->vfs_inode;
struct bio *bio = &bbio->bio;
+ struct fsverity_info *vi = NULL;
struct folio_iter fi;
ASSERT(!bio_flagged(bio, BIO_CLONED));
+
+ if (bbio->file_offset < i_size_read(inode))
+ vi = fsverity_get_info(inode);
+
bio_for_each_folio_all(fi, &bbio->bio) {
bool uptodate = !bio->bi_status;
struct folio *folio = fi.folio;
- struct inode *inode = folio->mapping->host;
u64 start = folio_pos(folio) + fi.offset;
btrfs_debug(fs_info,
@@ -617,7 +621,7 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
}
/* Update page status and unlock. */
- end_folio_read(folio, uptodate, start, fi.length);
+ end_folio_read(vi, folio, uptodate, start, fi.length);
}
bio_put(bio);
}
@@ -1003,6 +1007,7 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
struct extent_map *em;
int ret = 0;
const size_t blocksize = fs_info->sectorsize;
+ struct fsverity_info *vi = NULL;
ret = set_folio_extent_mapped(folio);
if (ret < 0) {
@@ -1010,6 +1015,9 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
return ret;
}
+ if (start < last_byte)
+ vi = fsverity_get_info(inode);
+
if (folio_contains(folio, last_byte >> PAGE_SHIFT)) {
size_t zero_offset = offset_in_folio(folio, last_byte);
@@ -1030,16 +1038,16 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) {
folio_zero_range(folio, pg_offset, end - cur + 1);
- end_folio_read(folio, true, cur, end - cur + 1);
+ end_folio_read(vi, folio, true, cur, end - cur + 1);
break;
}
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
- end_folio_read(folio, true, cur, blocksize);
+ end_folio_read(vi, folio, true, cur, blocksize);
continue;
}
em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
if (IS_ERR(em)) {
- end_folio_read(folio, false, cur, end + 1 - cur);
+ end_folio_read(vi, folio, false, cur, end + 1 - cur);
return PTR_ERR(em);
}
extent_offset = cur - em->start;
@@ -1116,12 +1124,12 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
folio_zero_range(folio, pg_offset, blocksize);
- end_folio_read(folio, true, cur, blocksize);
+ end_folio_read(vi, folio, true, cur, blocksize);
continue;
}
/* the get_extent function already copied into the folio */
if (block_start == EXTENT_MAP_INLINE) {
- end_folio_read(folio, true, cur, blocksize);
+ end_folio_read(vi, folio, true, cur, blocksize);
continue;
}
--
2.47.3
Powered by blists - more mailing lists