[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220921095849.84988-5-ZiyangZhang@linux.alibaba.com>
Date: Wed, 21 Sep 2022 17:58:45 +0800
From: ZiyangZhang <ZiyangZhang@...ux.alibaba.com>
To: ming.lei@...hat.com
Cc: axboe@...nel.dk, xiaoguang.wang@...ux.alibaba.com,
linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
joseph.qi@...ux.alibaba.com,
ZiyangZhang <ZiyangZhang@...ux.alibaba.com>
Subject: [PATCH V4 4/8] ublk_drv: requeue rqs with recovery feature enabled
With recovery feature enabled, in ublk_queue_rq or task work
(in exit_task_work or fallback wq), we requeue rqs instead of
ending(aborting) them. Besides, No matter recovery feature is enabled
or disabled, we schedule monitor_work immediately.
Signed-off-by: ZiyangZhang <ZiyangZhang@...ux.alibaba.com>
---
drivers/block/ublk_drv.c | 31 +++++++++++++++++++++++++++++--
1 file changed, 29 insertions(+), 2 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 3bdac4bdf46f..b940e490ebab 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -655,6 +655,19 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
#define UBLK_REQUEUE_DELAY_MS 3
+static inline void __ublk_abort_rq_in_task_work(struct ublk_queue *ubq,
+ struct request *rq)
+{
+ pr_devel("%s: %s q_id %d tag %d io_flags %x.\n", __func__,
+ (ublk_queue_can_use_recovery(ubq)) ? "requeue" : "abort",
+ ubq->q_id, rq->tag, ubq->ios[rq->tag].flags);
+ /* We cannot process this rq so just requeue it. */
+ if (ublk_queue_can_use_recovery(ubq))
+ blk_mq_requeue_request(rq, false);
+ else
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+}
+
static inline void __ublk_rq_task_work(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
@@ -677,7 +690,7 @@ static inline void __ublk_rq_task_work(struct request *req)
* (2) current->flags & PF_EXITING.
*/
if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
- blk_mq_end_request(req, BLK_STS_IOERR);
+ __ublk_abort_rq_in_task_work(ubq, req);
mod_delayed_work(system_wq, &ub->monitor_work, 0);
return;
}
@@ -752,6 +765,20 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
__ublk_rq_task_work(req);
}
+static inline blk_status_t __ublk_abort_rq(struct ublk_queue *ubq,
+ struct request *rq)
+{
+ pr_devel("%s: %s q_id %d tag %d io_flags %x.\n", __func__,
+ (ublk_queue_can_use_recovery(ubq)) ? "requeue" : "abort",
+ ubq->q_id, rq->tag, ubq->ios[rq->tag].flags);
+ /* We cannot process this rq so just requeue it. */
+ if (ublk_queue_can_use_recovery(ubq)) {
+ blk_mq_requeue_request(rq, false);
+ return BLK_STS_OK;
+ }
+ return BLK_STS_IOERR;
+}
+
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -769,7 +796,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(ubq_daemon_is_dying(ubq))) {
fail:
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
- return BLK_STS_IOERR;
+ return __ublk_abort_rq(ubq, rq);
}
if (ublk_can_use_task_work(ubq)) {
--
2.27.0
Powered by blists - more mailing lists