[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171216120726.517153-8-tj@kernel.org>
Date: Sat, 16 Dec 2017 04:07:26 -0800
From: Tejun Heo <tj@...nel.org>
To: jack@...e.cz, axboe@...nel.dk, clm@...com, jbacik@...com
Cc: kernel-team@...com, linux-kernel@...r.kernel.org,
linux-btrfs@...r.kernel.org, peterz@...radead.org,
jianchao.w.wang@...cle.com, Bart.VanAssche@....com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 7/7] blk-mq: rename blk_mq_hw_ctx->queue_rq_srcu to ->srcu
The RCU protection has been expanded to cover both queueing and
completion paths making ->queue_rq_srcu a misnomer. Rename it to
->srcu as suggested by Bart.
Signed-off-by: Tejun Heo <tj@...nel.org>
Cc: Bart Van Assche <Bart.VanAssche@....com>
---
block/blk-mq.c | 22 +++++++++++-----------
include/linux/blk-mq.h | 2 +-
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 724d340..872d4ee 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -219,7 +219,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
- synchronize_srcu(hctx->queue_rq_srcu);
+ synchronize_srcu(hctx->srcu);
else
rcu = true;
}
@@ -611,10 +611,10 @@ void blk_mq_complete_request(struct request *rq)
__blk_mq_complete_request(rq);
rcu_read_unlock();
} else {
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+ srcu_idx = srcu_read_lock(hctx->srcu);
if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
+ srcu_read_unlock(hctx->srcu, srcu_idx);
}
}
EXPORT_SYMBOL(blk_mq_complete_request);
@@ -916,7 +916,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
if (!(hctx->flags & BLK_MQ_F_BLOCKING))
has_rcu = true;
else
- synchronize_srcu(hctx->queue_rq_srcu);
+ synchronize_srcu(hctx->srcu);
hctx->nr_expired = 0;
}
@@ -1279,9 +1279,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
} else {
might_sleep();
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+ srcu_idx = srcu_read_lock(hctx->srcu);
blk_mq_sched_dispatch_requests(hctx);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
+ srcu_read_unlock(hctx->srcu, srcu_idx);
}
}
@@ -1718,9 +1718,9 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
might_sleep();
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+ srcu_idx = srcu_read_lock(hctx->srcu);
__blk_mq_try_issue_directly(hctx, rq, cookie, true);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
+ srcu_read_unlock(hctx->srcu, srcu_idx);
}
}
@@ -2074,7 +2074,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
set->ops->exit_hctx(hctx, hctx_idx);
if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->queue_rq_srcu);
+ cleanup_srcu_struct(hctx->srcu);
blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
@@ -2147,7 +2147,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
- init_srcu_struct(hctx->queue_rq_srcu);
+ init_srcu_struct(hctx->srcu);
blk_mq_debugfs_register_hctx(q, hctx);
@@ -2436,7 +2436,7 @@ static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
+ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
__alignof__(struct blk_mq_hw_ctx)) !=
sizeof(struct blk_mq_hw_ctx));
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 460798d..8efcf49 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -66,7 +66,7 @@ struct blk_mq_hw_ctx {
#endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ struct srcu_struct srcu[0];
};
struct blk_mq_tag_set {
--
2.9.5
Powered by blists - more mailing lists