[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230105212432.289569-2-tj@kernel.org>
Date: Thu, 5 Jan 2023 11:24:29 -1000
From: Tejun Heo <tj@...nel.org>
To: axboe@...nel.dk, josef@...icpanda.com, hch@....de
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 1/4] blkcg: Drop unnecessary RCU read [un]locks from blkg_conf_prep/finish()
Holding the queue lock now implies RCU read lock, so no need to use
rcu_read_[un]lock() explicitly. This shouldn't cause any behavior changes.
While at it, drop __acquires() annotation on the queue lock too. The
__acquires() part was already out of sync and it doesn't catch anything that
lockdep can't.
Signed-off-by: Tejun Heo <tj@...nel.org>
---
block/blk-cgroup.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ce6a2b7d3dfb..99674e23cf88 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -672,12 +672,11 @@ struct block_device *blkcg_conf_open_bdev(char **inputp)
*
* Parse per-blkg config update from @input and initialize @ctx with the
* result. @ctx->blkg points to the blkg to be updated and @ctx->body the
- * part of @input following MAJ:MIN. This function returns with RCU read
- * lock and queue lock held and must be paired with blkg_conf_finish().
+ * part of @input following MAJ:MIN. This function returns with queue lock
+ * held and must be paired with blkg_conf_finish().
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
- __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
{
struct block_device *bdev;
struct gendisk *disk;
@@ -699,7 +698,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
if (ret)
goto fail;
- rcu_read_lock();
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
@@ -728,7 +726,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
spin_unlock_irq(&q->queue_lock);
- rcu_read_unlock();
new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
if (unlikely(!new_blkg)) {
@@ -742,7 +739,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail_exit_queue;
}
- rcu_read_lock();
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
@@ -778,7 +774,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(&q->queue_lock);
- rcu_read_unlock();
fail_exit_queue:
blk_queue_exit(q);
fail:
@@ -805,10 +800,8 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
{
spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
- rcu_read_unlock();
blkdev_put_no_open(ctx->bdev);
}
EXPORT_SYMBOL_GPL(blkg_conf_finish);
--
2.39.0
Powered by blists - more mailing lists