diff '--color=auto' -urpN a/block/blk-mq.c b/block/blk-mq.c --- a/block/blk-mq.c 2025-02-21 14:01:47.000000000 +0100 +++ b/block/blk-mq.c 2025-02-21 15:52:51.848041852 +0100 @@ -1418,7 +1418,7 @@ void blk_execute_rq_nowait(struct reques } blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); - blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); + blk_mq_run_hw_queue(hctx, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); @@ -2322,8 +2322,6 @@ void blk_mq_run_hw_queue(struct blk_mq_h */ WARN_ON_ONCE(!async && in_interrupt()); - might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING); - need_run = blk_mq_hw_queue_need_run(hctx); if (!need_run) { unsigned long flags; @@ -2342,7 +2340,8 @@ void blk_mq_run_hw_queue(struct blk_mq_h return; } - if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { + if (async || (hctx->flags & BLK_MQ_F_BLOCKING) || + !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { blk_mq_delay_run_hw_queue(hctx, 0); return; } @@ -2477,7 +2476,7 @@ void blk_mq_start_hw_queue(struct blk_mq { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); - blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); + blk_mq_run_hw_queue(hctx, false); } EXPORT_SYMBOL(blk_mq_start_hw_queue); @@ -2513,8 +2512,7 @@ void blk_mq_start_stopped_hw_queues(stru unsigned long i; queue_for_each_hw_ctx(q, hctx, i) - blk_mq_start_stopped_hw_queue(hctx, async || - (hctx->flags & BLK_MQ_F_BLOCKING)); + blk_mq_start_stopped_hw_queue(hctx, async); } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); @@ -2572,8 +2570,6 @@ static void blk_mq_insert_requests(struc list_for_each_entry(rq, list, queuelist) { BUG_ON(rq->mq_ctx != ctx); trace_block_rq_insert(rq); - if (rq->cmd_flags & REQ_NOWAIT) - run_queue_async = true; } spin_lock(&ctx->lock); @@ -2739,7 +2735,7 @@ static void blk_mq_try_issue_directly(st if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) { blk_mq_insert_request(rq, 0); - blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT); + blk_mq_run_hw_queue(hctx, false); return; } diff '--color=auto' -urpN a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c --- a/drivers/scsi/scsi_lib.c 2025-02-21 14:01:47.000000000 +0100 +++ b/drivers/scsi/scsi_lib.c 2025-02-21 15:53:54.654044691 +0100 @@ -429,8 +429,7 @@ static void scsi_single_lun_run(struct s * but in most cases, we will be first. Ideally, each LU on the * target would get some limited time or requests on the target. */ - blk_mq_run_hw_queues(current_sdev->request_queue, - shost->queuecommand_may_block); + blk_mq_run_hw_queues(current_sdev->request_queue, false); spin_lock_irqsave(shost->host_lock, flags); if (!starget->starget_sdev_user)