[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220729105036.2202791-1-zhangwensheng@huaweicloud.com>
Date: Fri, 29 Jul 2022 18:50:36 +0800
From: Zhang Wensheng <zhangwensheng@...weicloud.com>
To: axboe@...nel.dk
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
bpf@...r.kernel.org, yukuai3@...wei.com,
zhangwensheng@...weicloud.com
Subject: [PATCH -next] [RFC] block: fix null-deref in percpu_ref_put
From: Zhang Wensheng <zhangwensheng5@...wei.com>
A problem was find in stable 5.10 and the root cause of it like below.
In the use of q_usage_counter of request_queue, blk_cleanup_queue using
"wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter))"
to wait q_usage_counter becoming zero. however, if the q_usage_counter
becoming zero quickly, and percpu_ref_exit will execute and ref->data
will be freed, maybe another process will cause a null-defef problem
like below:
CPU0 CPU1
blk_cleanup_queue
blk_freeze_queue
blk_mq_freeze_queue_wait
scsi_end_request
percpu_ref_get
...
percpu_ref_put
atomic_long_sub_and_test
percpu_ref_exit
ref->data -> NULL
ref->data->release(ref) -> null-deref
Fix it by setting flag(QUEUE_FLAG_USAGE_COUNT_SYNC) to add synchronization
mechanism, when ref->data->release is called, the flag will be setted,
and the "wait_event" in blk_mq_freeze_queue_wait must wait flag becoming
true as well, which will limit percpu_ref_exit to execute ahead of time.
Although the problem was not reproduced in mainline, it may also has
problem when the passthrough IO which will go directly to
blk_cleanup_queue and cause the problem as well.
Signed-off-by: Zhang Wensheng <zhangwensheng5@...wei.com>
---
block/blk-core.c | 4 +++-
block/blk-mq.c | 7 +++++++
include/linux/blk-mq.h | 1 +
include/linux/blkdev.h | 2 ++
4 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index 27fb1357ad4b..4b73f46e62ec 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -312,7 +312,8 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that blk_mq_run_hw_queues() accesses the hardware queues
* after draining finished.
*/
- blk_freeze_queue(q);
+ blk_freeze_queue_start(q);
+ blk_mq_freeze_queue_wait_sync(q);
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
@@ -403,6 +404,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
+ blk_queue_flag_set(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
wake_up_all(&q->mq_freeze_wq);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 93d9d60980fb..44e764257511 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -165,6 +165,7 @@ void blk_freeze_queue_start(struct request_queue *q)
{
mutex_lock(&q->mq_freeze_lock);
if (++q->mq_freeze_depth == 1) {
+ blk_queue_flag_clear(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
percpu_ref_kill(&q->q_usage_counter);
mutex_unlock(&q->mq_freeze_lock);
if (queue_is_mq(q))
@@ -175,6 +176,12 @@ void blk_freeze_queue_start(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
+void blk_mq_freeze_queue_wait_sync(struct request_queue *q)
+{
+ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter) &&
+ test_bit(QUEUE_FLAG_USAGE_COUNT_SYNC, &q->queue_flags));
+}
+
void blk_mq_freeze_queue_wait(struct request_queue *q)
{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e2d9daf7e8dd..50fd56f85b31 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -868,6 +868,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
+void blk_mq_freeze_queue_wait_sync(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2f7b43444c5f..93ed8b166d66 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -575,6 +575,8 @@ struct request_queue {
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
+/* sync for q_usage_counter */
+#define QUEUE_FLAG_USAGE_COUNT_SYNC 31
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
--
2.31.1
Powered by blists - more mailing lists