[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6b6c9d8d-9a55-4769-ba3a-00d5ee7f32b3@huaweicloud.com>
Date: Fri, 9 Jan 2026 16:37:19 +0800
From: Zheng Qixing <zhengqixing@...weicloud.com>
To: yukuai@...as.com
Cc: hch@...radead.org, cgroups@...r.kernel.org, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org, yi.zhang@...wei.com, yangerkun@...wei.com,
houtao1@...wei.com, tj@...nel.org, josef@...icpanda.com, axboe@...nel.dk,
zhengqixing@...wei.com
Subject: Re: [PATCH 2/3] blk-cgroup: fix uaf in blkcg_activate_policy() racing
with blkg_free_workfn()
Sorry for that I replied with the wrong patch. Even after adding the
blkcg_mutex in blkg_destroy_all()
and blkcg_activate_policy(), the UAF issue described in the 3rd patch
(multiple calls to call_rcu releasing
the same blkg) still occurs.
For the 2nd patch, in addition to the blkcg_mutex that I initially added
in the enomem branch, it is
indeed possible to add blkcg_mutex at other places where blkg_list is
accessed. This would prevent
the case where pd_alloc_fn(..., GFP_NOWAIT) succeeds while the
corresponding blkg is being destroyed,
which could otherwise lead to a memory leak.
> diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
> index 5e1a724a799a..439cafa98c92 100644
> --- a/block/blk-cgroup.c
> +++ b/block/blk-cgroup.c
> @@ -573,6 +573,7 @@ static void blkg_destroy_all(struct gendisk *disk)
> int count = BLKG_DESTROY_BATCH_SIZE;
> int i;
>
> + mutex_lock(&q->blkcg_mutex);
> restart:
> spin_lock_irq(&q->queue_lock);
> list_for_each_entry(blkg, &q->blkg_list, q_node) {
> @@ -592,7 +593,9 @@ static void blkg_destroy_all(struct gendisk *disk)
> if (!(--count)) {
> count = BLKG_DESTROY_BATCH_SIZE;
> spin_unlock_irq(&q->queue_lock);
> + mutex_unlock(&q->blkcg_mutex);
> cond_resched();
> + mutex_lock(&q->blkcg_mutex);
> goto restart;
> }
> }
> @@ -611,6 +614,7 @@ static void blkg_destroy_all(struct gendisk *disk)
>
> q->root_blkg = NULL;
> spin_unlock_irq(&q->queue_lock);
> + mutex_unlock(&q->blkcg_mutex);
> }
>
> static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
> @@ -1621,6 +1625,8 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
>
> if (queue_is_mq(q))
> memflags = blk_mq_freeze_queue(q);
> +
> + mutex_lock(&q->blkcg_mutex);
> retry:
> spin_lock_irq(&q->queue_lock);
>
> @@ -1682,6 +1688,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
> ret = 0;
>
> spin_unlock_irq(&q->queue_lock);
> + mutex_unlock(&q->blkcg_mutex);
> out:
> if (queue_is_mq(q))
> blk_mq_unfreeze_queue(q, memflags);
> @@ -1696,6 +1703,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
> spin_lock_irq(&q->queue_lock);
> blkcg_policy_teardown_pds(q, pol);
> spin_unlock_irq(&q->queue_lock);
> + mutex_unlock(&q->blkcg_mutex);
> ret = -ENOMEM;
> goto out;
> }
Powered by blists - more mailing lists