[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250108114326.1729250-1-neelx@suse.com>
Date: Wed, 8 Jan 2025 12:43:25 +0100
From: Daniel Vacek <neelx@...e.com>
To: Chris Mason <clm@...com>,
Josef Bacik <josef@...icpanda.com>,
David Sterba <dsterba@...e.com>
Cc: Daniel Vacek <neelx@...e.com>,
linux-btrfs@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH] btrfs: keep `priv` struct on stack for sync reads in `btrfs_encoded_read_regular_fill_pages()`
Only allocate the `priv` struct from slab for asynchronous mode.
There's no need to allocate an object from slab in the synchronous mode. In
such a case stack can be happily used as it used to be before 68d3b27e05c7
("btrfs: move priv off stack in btrfs_encoded_read_regular_fill_pages()")
which was a preparation for the async mode.
While at it, fix the comment to reflect the atomic => refcount change in
d29662695ed7 ("btrfs: fix use-after-free waiting for encoded read endios").
Signed-off-by: Daniel Vacek <neelx@...e.com>
---
fs/btrfs/inode.c | 62 +++++++++++++++++++++++++-----------------------
1 file changed, 32 insertions(+), 30 deletions(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 27b2fe7f735d5..4d30857df4bcb 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9078,9 +9078,9 @@ static ssize_t btrfs_encoded_read_inline(
}
struct btrfs_encoded_read_private {
- struct completion done;
+ struct completion *sync_reads;
void *uring_ctx;
- refcount_t pending_refs;
+ refcount_t pending_reads;
blk_status_t status;
};
@@ -9090,23 +9090,22 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
if (bbio->bio.bi_status) {
/*
- * The memory barrier implied by the atomic_dec_return() here
- * pairs with the memory barrier implied by the
- * atomic_dec_return() or io_wait_event() in
- * btrfs_encoded_read_regular_fill_pages() to ensure that this
- * write is observed before the load of status in
- * btrfs_encoded_read_regular_fill_pages().
+ * The memory barrier implied by the
+ * refcount_dec_and_test() here pairs with the memory
+ * barrier implied by the refcount_dec_and_test() in
+ * btrfs_encoded_read_regular_fill_pages() to ensure
+ * that this write is observed before the load of
+ * status in btrfs_encoded_read_regular_fill_pages().
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (refcount_dec_and_test(&priv->pending_refs)) {
- int err = blk_status_to_errno(READ_ONCE(priv->status));
-
+ if (refcount_dec_and_test(&priv->pending_reads)) {
if (priv->uring_ctx) {
+ int err = blk_status_to_errno(READ_ONCE(priv->status));
btrfs_uring_read_extent_endio(priv->uring_ctx, err);
kfree(priv);
} else {
- complete(&priv->done);
+ complete(priv->sync_reads);
}
}
bio_put(&bbio->bio);
@@ -9117,17 +9116,22 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
struct page **pages, void *uring_ctx)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_encoded_read_private *priv;
+ struct btrfs_encoded_read_private *priv, sync_priv;
+ struct completion sync_reads;
unsigned long i = 0;
struct btrfs_bio *bbio;
- int ret;
- priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
- if (!priv)
- return -ENOMEM;
+ if (uring_ctx) {
+ priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+ } else {
+ priv = &sync_priv;
+ init_completion(&sync_reads);
+ priv->sync_reads = &sync_reads;
+ }
- init_completion(&priv->done);
- refcount_set(&priv->pending_refs, 1);
+ refcount_set(&priv->pending_reads, 1);
priv->status = 0;
priv->uring_ctx = uring_ctx;
@@ -9140,7 +9144,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- refcount_inc(&priv->pending_refs);
+ refcount_inc(&priv->pending_reads);
btrfs_submit_bbio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
@@ -9155,25 +9159,23 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- refcount_inc(&priv->pending_refs);
+ refcount_inc(&priv->pending_reads);
btrfs_submit_bbio(bbio, 0);
if (uring_ctx) {
- if (refcount_dec_and_test(&priv->pending_refs)) {
- ret = blk_status_to_errno(READ_ONCE(priv->status));
- btrfs_uring_read_extent_endio(uring_ctx, ret);
+ if (refcount_dec_and_test(&priv->pending_reads)) {
+ int err = blk_status_to_errno(READ_ONCE(priv->status));
+ btrfs_uring_read_extent_endio(uring_ctx, err);
kfree(priv);
- return ret;
+ return err;
}
return -EIOCBQUEUED;
} else {
- if (!refcount_dec_and_test(&priv->pending_refs))
- wait_for_completion_io(&priv->done);
+ if (!refcount_dec_and_test(&priv->pending_reads))
+ wait_for_completion_io(&sync_reads);
/* See btrfs_encoded_read_endio() for ordering. */
- ret = blk_status_to_errno(READ_ONCE(priv->status));
- kfree(priv);
- return ret;
+ return blk_status_to_errno(READ_ONCE(priv->status));
}
}
--
2.45.2
Powered by blists - more mailing lists