[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260106-mq-cake-sub-qdisc-v6-4-ee2e06b1eb1a@redhat.com>
Date: Tue, 06 Jan 2026 12:40:55 +0100
From: Toke Høiland-Jørgensen <toke@...hat.com>
To: Toke Høiland-Jørgensen <toke@...e.dk>,
Jamal Hadi Salim <jhs@...atatu.com>, Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Simon Horman <horms@...nel.org>
Cc: Jonas Köppeler <j.koeppeler@...berlin.de>,
cake@...ts.bufferbloat.net, netdev@...r.kernel.org,
Toke Høiland-Jørgensen <toke@...hat.com>
Subject: [PATCH net-next v6 4/6] net/sched: sch_cake: Share config across
cake_mq sub-qdiscs
This adds support for configuring the cake_mq instance directly, sharing
the config across the cake sub-qdiscs.
Signed-off-by: Toke Høiland-Jørgensen <toke@...hat.com>
---
net/sched/sch_cake.c | 173 +++++++++++++++++++++++++++++++++++++++------------
1 file changed, 133 insertions(+), 40 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index fa01c352f5a5..f9dafa687950 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -212,6 +212,7 @@ struct cake_sched_config {
u8 flow_mode;
u8 atm_mode;
u8 ack_filter;
+ u8 is_shared;
};
struct cake_sched_data {
@@ -2587,14 +2588,12 @@ static void cake_reconfigure(struct Qdisc *sch)
q->buffer_config_limit));
}
-static int cake_change(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
+static int cake_config_change(struct cake_sched_config *q, struct nlattr *opt,
+ struct netlink_ext_ack *extack, bool *overhead_changed)
{
- struct cake_sched_data *qd = qdisc_priv(sch);
- struct cake_sched_config *q = qd->config;
struct nlattr *tb[TCA_CAKE_MAX + 1];
- u16 rate_flags;
- u8 flow_mode;
+ u16 rate_flags = q->rate_flags;
+ u8 flow_mode = q->flow_mode;
int err;
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
@@ -2602,7 +2601,6 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
- flow_mode = q->flow_mode;
if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
flow_mode &= ~CAKE_FLOW_NAT_FLAG;
@@ -2615,6 +2613,19 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
#endif
}
+ if (tb[TCA_CAKE_AUTORATE]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) {
+ if (q->is_shared) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
+ "Can't use autorate-ingress with cake_mq");
+ return -EOPNOTSUPP;
+ }
+ rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+ } else {
+ rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+ }
+ }
+
if (tb[TCA_CAKE_BASE_RATE64])
WRITE_ONCE(q->rate_bps,
nla_get_u64(tb[TCA_CAKE_BASE_RATE64]));
@@ -2623,7 +2634,6 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->tin_mode,
nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]));
- rate_flags = q->rate_flags;
if (tb[TCA_CAKE_WASH]) {
if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
rate_flags |= CAKE_FLAG_WASH;
@@ -2644,20 +2654,12 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->rate_overhead,
nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
rate_flags |= CAKE_FLAG_OVERHEAD;
-
- qd->max_netlen = 0;
- qd->max_adjlen = 0;
- qd->min_netlen = ~0;
- qd->min_adjlen = ~0;
+ *overhead_changed = true;
}
if (tb[TCA_CAKE_RAW]) {
rate_flags &= ~CAKE_FLAG_OVERHEAD;
-
- qd->max_netlen = 0;
- qd->max_adjlen = 0;
- qd->min_netlen = ~0;
- qd->min_adjlen = ~0;
+ *overhead_changed = true;
}
if (tb[TCA_CAKE_MPU])
@@ -2676,13 +2678,6 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->target, max(target, 1U));
}
- if (tb[TCA_CAKE_AUTORATE]) {
- if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
- rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
- else
- rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
- }
-
if (tb[TCA_CAKE_INGRESS]) {
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
rate_flags |= CAKE_FLAG_INGRESS;
@@ -2713,6 +2708,34 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->rate_flags, rate_flags);
WRITE_ONCE(q->flow_mode, flow_mode);
+
+ return 0;
+}
+
+static int cake_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q = qd->config;
+ bool overhead_changed = false;
+ int ret;
+
+ if (q->is_shared) {
+ NL_SET_ERR_MSG(extack, "can't reconfigure cake_mq sub-qdiscs");
+ return -EOPNOTSUPP;
+ }
+
+ ret = cake_config_change(q, opt, extack, &overhead_changed);
+ if (ret)
+ return ret;
+
+ if (overhead_changed) {
+ qd->max_netlen = 0;
+ qd->max_adjlen = 0;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
+ }
+
if (qd->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
@@ -2729,7 +2752,23 @@ static void cake_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
tcf_block_put(q->block);
kvfree(q->tins);
- kvfree(q->config);
+ if (q->config && !q->config->is_shared)
+ kvfree(q->config);
+}
+
+static void cake_config_init(struct cake_sched_config *q, bool is_shared)
+{
+ q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
+ q->flow_mode = CAKE_FLOW_TRIPLE;
+
+ q->rate_bps = 0; /* unlimited by default */
+
+ q->interval = 100000; /* 100ms default */
+ q->target = 5000; /* 5ms: codel RFC argues
+ * for 5 to 10% of interval
+ */
+ q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+ q->is_shared = is_shared;
}
static int cake_init(struct Qdisc *sch, struct nlattr *opt,
@@ -2743,19 +2782,11 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
if (!q)
return -ENOMEM;
+ cake_config_init(q, false);
+
sch->limit = 10240;
sch->flags |= TCQ_F_DEQUEUE_DROPS;
- q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
- q->flow_mode = CAKE_FLOW_TRIPLE;
-
- q->rate_bps = 0; /* unlimited by default */
-
- q->interval = 100000; /* 100ms default */
- q->target = 5000; /* 5ms: codel RFC argues
- * for 5 to 10% of interval
- */
- q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
qd->cur_tin = 0;
qd->cur_flow = 0;
qd->config = q;
@@ -2818,10 +2849,21 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
return err;
}
-static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
+static void cake_config_replace(struct Qdisc *sch, struct cake_sched_config *cfg)
{
struct cake_sched_data *qd = qdisc_priv(sch);
struct cake_sched_config *q = qd->config;
+
+ qd->config = cfg;
+
+ if (!q->is_shared)
+ kvfree(q);
+
+ cake_reconfigure(sch);
+}
+
+static int cake_config_dump(struct cake_sched_config *q, struct sk_buff *skb)
+{
struct nlattr *opts;
u16 rate_flags;
u8 flow_mode;
@@ -2897,6 +2939,13 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
return -1;
}
+static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct cake_sched_data *qd = qdisc_priv(sch);
+
+ return cake_config_dump(qd->config, skb);
+}
+
static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
@@ -3160,6 +3209,7 @@ MODULE_ALIAS_NET_SCH("cake");
struct cake_mq_sched {
struct mq_sched mq_priv; /* must be first */
+ struct cake_sched_config cake_config;
};
static void cake_mq_destroy(struct Qdisc *sch)
@@ -3170,25 +3220,68 @@ static void cake_mq_destroy(struct Qdisc *sch)
static int cake_mq_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
- int ret;
+ struct cake_mq_sched *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int ret, ntx;
+ bool _unused;
+
+ cake_config_init(&priv->cake_config, true);
+ if (opt) {
+ ret = cake_config_change(&priv->cake_config, opt, extack, &_unused);
+ if (ret)
+ return ret;
+ }
ret = mq_init_common(sch, opt, extack, &cake_qdisc_ops);
if (ret)
return ret;
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
+ cake_config_replace(priv->mq_priv.qdiscs[ntx], &priv->cake_config);
+
return 0;
}
static int cake_mq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
+ struct cake_mq_sched *priv = qdisc_priv(sch);
+
mq_dump_common(sch, skb);
- return 0;
+ return cake_config_dump(&priv->cake_config, skb);
}
static int cake_mq_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
- return -EOPNOTSUPP;
+ struct cake_mq_sched *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ bool overhead_changed = false;
+ unsigned int ntx;
+ int ret;
+
+ ret = cake_config_change(&priv->cake_config, opt, extack, &overhead_changed);
+ if (ret)
+ return ret;
+
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ struct Qdisc *chld = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
+ struct cake_sched_data *qd = qdisc_priv(chld);
+
+ if (overhead_changed) {
+ qd->max_netlen = 0;
+ qd->max_adjlen = 0;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
+ }
+
+ if (qd->tins) {
+ sch_tree_lock(chld);
+ cake_reconfigure(chld);
+ sch_tree_unlock(chld);
+ }
+ }
+
+ return 0;
}
static int cake_mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
--
2.52.0
Powered by blists - more mailing lists