[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251118062159.2358085-3-hch@lst.de>
Date: Tue, 18 Nov 2025 07:21:45 +0100
From: Christoph Hellwig <hch@....de>
To: Eric Biggers <ebiggers@...nel.org>
Cc: "Theodore Y. Ts'o" <tytso@....edu>,
Jaegeuk Kim <jaegeuk@...nel.org>,
Andreas Dilger <adilger.kernel@...ger.ca>,
Chao Yu <chao@...nel.org>,
Christian Brauner <brauner@...nel.org>,
"Darrick J. Wong" <djwong@...nel.org>,
linux-fscrypt@...r.kernel.org,
linux-ext4@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net,
linux-fsdevel@...r.kernel.org
Subject: [PATCH 02/11] fscrypt: keep multiple bios in flight in fscrypt_zeroout_range_inline_crypt
This should slightly improve performance for large zeroing operations,
but more importantly prepares for blk-crypto refactoring that requires
all fscrypt users to call submit_bio directly.
Signed-off-by: Christoph Hellwig <hch@....de>
Reviewed-by: Eric Biggers <ebiggers@...nel.org>
---
fs/crypto/bio.c | 86 +++++++++++++++++++++++++++++++------------------
1 file changed, 54 insertions(+), 32 deletions(-)
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 68b0424d879a..c2b3ca100f8d 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -47,49 +47,71 @@ bool fscrypt_decrypt_bio(struct bio *bio)
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
+struct fscrypt_zero_done {
+ atomic_t pending;
+ blk_status_t status;
+ struct completion done;
+};
+
+static void fscrypt_zeroout_range_done(struct fscrypt_zero_done *done)
+{
+ if (atomic_dec_and_test(&done->pending))
+ complete(&done->done);
+}
+
+static void fscrypt_zeroout_range_end_io(struct bio *bio)
+{
+ struct fscrypt_zero_done *done = bio->bi_private;
+
+ if (bio->bi_status)
+ cmpxchg(&done->status, 0, bio->bi_status);
+ fscrypt_zeroout_range_done(done);
+ bio_put(bio);
+}
+
static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
pgoff_t lblk, sector_t sector,
unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
- struct bio *bio;
- int ret, err = 0;
- int num_pages = 0;
-
- /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
- GFP_NOFS);
+ struct fscrypt_zero_done done = {
+ .pending = ATOMIC_INIT(1),
+ .done = COMPLETION_INITIALIZER_ONSTACK(done.done),
+ };
while (len) {
- unsigned int blocks_this_page = min(len, blocks_per_page);
- unsigned int bytes_this_page = blocks_this_page << blockbits;
+ struct bio *bio;
+ unsigned int n;
- if (num_pages == 0) {
- fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
- bio->bi_iter.bi_sector = sector;
- }
- ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
- if (WARN_ON_ONCE(ret != bytes_this_page)) {
- err = -EIO;
- goto out;
- }
- num_pages++;
- len -= blocks_this_page;
- lblk += blocks_this_page;
- sector += (bytes_this_page >> SECTOR_SHIFT);
- if (num_pages == BIO_MAX_VECS || !len ||
- !fscrypt_mergeable_bio(bio, inode, lblk)) {
- err = submit_bio_wait(bio);
- if (err)
- goto out;
- bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
- num_pages = 0;
+ bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
+ GFP_NOFS);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_private = &done;
+ bio->bi_end_io = fscrypt_zeroout_range_end_io;
+ fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
+
+ for (n = 0; n < BIO_MAX_VECS; n++) {
+ unsigned int blocks_this_page =
+ min(len, blocks_per_page);
+ unsigned int bytes_this_page = blocks_this_page << blockbits;
+
+ __bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
+ len -= blocks_this_page;
+ lblk += blocks_this_page;
+ sector += (bytes_this_page >> SECTOR_SHIFT);
+ if (!len || !fscrypt_mergeable_bio(bio, inode, lblk))
+ break;
}
+
+ atomic_inc(&done.pending);
+ submit_bio(bio);
}
-out:
- bio_put(bio);
- return err;
+
+ fscrypt_zeroout_range_done(&done);
+
+ wait_for_completion(&done.done);
+ return blk_status_to_errno(done.status);
}
/**
--
2.47.3
Powered by blists - more mailing lists