[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPpSM+RKqRQoc7+6zgC-7O_5O73X7CK=oOWdHafB-h9OHAuEfw@mail.gmail.com>
Date: Sat, 5 Jul 2025 15:49:41 -0700
From: Xiang Mei <xmei5@....edu>
To: xiyou.wangcong@...il.com
Cc: netdev@...r.kernel.org, gregkh@...uxfoundation.org, jhs@...atatu.com,
jiri@...nulli.us, security@...nel.org
Subject: Re: [PATCH v1] net/sched: sch_qfq: Fix race condition on qfq_aggregate
Hi Cong,
This is a sch_tree_lock version of the patch. I agree with your point
that it's unusual to use sch_tree_lock for aggregation. What do you
think about applying RCU locks on agg pointers and using
rcu_dereference_bh?
Thanks,
Xiang
On Sat, Jul 5, 2025 at 3:40 PM Xiang Mei <xmei5@....edu> wrote:
>
> A race condition can occur when 'agg' is modified in qfq_change_agg
> (called during qfq_enqueue) while other threads access it
> concurrently. For example, qfq_dump_class may trigger a NULL
> dereference, and qfq_delete_class may cause a use-after-free.
>
> This patch addresses the issue by:
>
> 1. Moved qfq_destroy_class into the critical section.
>
> 2. Added sch_tree_lock protection to qfq_dump_class and
> qfq_dump_class_stats.
>
> Signed-off-by: Xiang Mei <xmei5@....edu>
> ---
> v1: Apply sch_tree_lock to avoid race conditions on qfq_aggregate.
>
> net/sched/sch_qfq.c | 30 +++++++++++++++++++++---------
> 1 file changed, 21 insertions(+), 9 deletions(-)
>
> diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
> index 5e557b960..a2b321fec 100644
> --- a/net/sched/sch_qfq.c
> +++ b/net/sched/sch_qfq.c
> @@ -412,7 +412,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
> bool existing = false;
> struct nlattr *tb[TCA_QFQ_MAX + 1];
> struct qfq_aggregate *new_agg = NULL;
> - u32 weight, lmax, inv_w;
> + u32 weight, lmax, inv_w, old_weight, old_lmax;
> int err;
> int delta_w;
>
> @@ -446,12 +446,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
> inv_w = ONE_FP / weight;
> weight = ONE_FP / inv_w;
>
> - if (cl != NULL &&
> - lmax == cl->agg->lmax &&
> - weight == cl->agg->class_weight)
> - return 0; /* nothing to change */
> + if (cl != NULL) {
> + sch_tree_lock(sch);
> + old_weight = cl->agg->class_weight;
> + old_lmax = cl->agg->lmax;
> + sch_tree_unlock(sch);
> + if (lmax == old_lmax && weight == old_weight)
> + return 0; /* nothing to change */
> + }
>
> - delta_w = weight - (cl ? cl->agg->class_weight : 0);
> + delta_w = weight - (cl ? old_weight : 0);
>
> if (q->wsum + delta_w > QFQ_MAX_WSUM) {
> NL_SET_ERR_MSG_FMT_MOD(extack,
> @@ -558,10 +562,10 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
>
> qdisc_purge_queue(cl->qdisc);
> qdisc_class_hash_remove(&q->clhash, &cl->common);
> + qfq_destroy_class(sch, cl);
>
> sch_tree_unlock(sch);
>
> - qfq_destroy_class(sch, cl);
> return 0;
> }
>
> @@ -628,6 +632,7 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
> {
> struct qfq_class *cl = (struct qfq_class *)arg;
> struct nlattr *nest;
> + u32 class_weight, lmax;
>
> tcm->tcm_parent = TC_H_ROOT;
> tcm->tcm_handle = cl->common.classid;
> @@ -636,8 +641,13 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
> nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
> if (nest == NULL)
> goto nla_put_failure;
> - if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
> - nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
> +
> + sch_tree_lock(sch);
> + class_weight = cl->agg->class_weight;
> + lmax = cl->agg->lmax;
> + sch_tree_unlock(sch);
> + if (nla_put_u32(skb, TCA_QFQ_WEIGHT, class_weight) ||
> + nla_put_u32(skb, TCA_QFQ_LMAX, lmax))
> goto nla_put_failure;
> return nla_nest_end(skb, nest);
>
> @@ -654,8 +664,10 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
>
> memset(&xstats, 0, sizeof(xstats));
>
> + sch_tree_lock(sch);
> xstats.weight = cl->agg->class_weight;
> xstats.lmax = cl->agg->lmax;
> + sch_tree_unlock(sch);
>
> if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
> gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
> --
> 2.43.0
>
Powered by blists - more mailing lists