[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <177022458258.1827734.18192185731352302528.stgit@firesoul>
Date: Wed, 04 Feb 2026 18:03:02 +0100
From: Jesper Dangaard Brouer <hawk@...nel.org>
To: netdev@...r.kernel.org, Eric Dumazet <eric.dumazet@...il.com>,
"David S. Miller" <davem@...emloft.net>, Paolo Abeni <pabeni@...hat.com>,
Toke Høiland-Jørgensen <toke@...e.dk>
Cc: Jesper Dangaard Brouer <hawk@...nel.org>, bpf@...r.kernel.org,
Jakub Kicinski <kuba@...nel.org>, horms@...nel.org, jiri@...nulli.us,
edumazet@...gle.com, xiyou.wangcong@...il.com, jhs@...atatu.com,
carges@...udflare.com, kernel-team@...udflare.com
Subject: [PATCH net-next RFC v1 2/3] net: sched: sfq: convert to qdisc drop
reasons
Convert SFQ to use the new qdisc-specific drop reason infrastructure.
This patch demonstrates how to convert a flow-based qdisc to use the
new enum qdisc_drop_reason. As part of this conversion:
- Add QDISC_DROP_MAXFLOWS for flow table exhaustion
- Rename FQ_FLOW_LIMIT to generic FLOW_LIMIT, now shared by FQ and SFQ
- Use QDISC_DROP_OVERLIMIT for sfq_drop() when overall limit exceeded
- Use QDISC_DROP_FLOW_LIMIT for per-flow depth limit exceeded
The FLOW_LIMIT reason is now a common drop reason for per-flow limits,
applicable to both FQ and SFQ qdiscs.
Signed-off-by: Jesper Dangaard Brouer <hawk@...nel.org>
---
include/net/dropreason-qdisc.h | 22 ++++++++++++++++------
net/sched/sch_fq.c | 2 +-
net/sched/sch_sfq.c | 8 ++++----
3 files changed, 21 insertions(+), 11 deletions(-)
diff --git a/include/net/dropreason-qdisc.h b/include/net/dropreason-qdisc.h
index 2175ab34921e..2c0a5c358a5f 100644
--- a/include/net/dropreason-qdisc.h
+++ b/include/net/dropreason-qdisc.h
@@ -8,10 +8,11 @@
FN(GENERIC) \
FN(OVERLIMIT) \
FN(CONGESTED) \
+ FN(FLOW_LIMIT) \
+ FN(MAXFLOWS) \
FN(CAKE_FLOOD) \
FN(FQ_BAND_LIMIT) \
FN(FQ_HORIZON_LIMIT) \
- FN(FQ_FLOW_LIMIT) \
FNe(MAX)
#undef FN
@@ -51,6 +52,20 @@ enum qdisc_drop_reason {
* congestion to the sender and prevent bufferbloat.
*/
QDISC_DROP_CONGESTED,
+ /**
+ * @QDISC_DROP_FLOW_LIMIT: packet dropped because an individual flow
+ * exceeded its per-flow packet/depth limit. Used by FQ and SFQ qdiscs
+ * to enforce per-flow fairness and prevent a single flow from
+ * monopolizing queue resources.
+ */
+ QDISC_DROP_FLOW_LIMIT,
+ /**
+ * @QDISC_DROP_MAXFLOWS: packet dropped because the qdisc's flow
+ * tracking table is full and no free slots are available to allocate
+ * for a new flow. This indicates flow table exhaustion in flow-based
+ * qdiscs that maintain per-flow state (e.g., SFQ).
+ */
+ QDISC_DROP_MAXFLOWS,
/**
* @QDISC_DROP_CAKE_FLOOD: CAKE qdisc dropped packet due to flood
* protection mechanism (BLUE algorithm). This indicates potential
@@ -69,11 +84,6 @@ enum qdisc_drop_reason {
* packets with unreasonable future timestamps from blocking the queue.
*/
QDISC_DROP_FQ_HORIZON_LIMIT,
- /**
- * @QDISC_DROP_FQ_FLOW_LIMIT: FQ dropped packet because an individual
- * flow exceeded its per-flow packet limit.
- */
- QDISC_DROP_FQ_FLOW_LIMIT,
/**
* @QDISC_DROP_MAX: the maximum of qdisc drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 058801fefca1..ca615d5433db 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -580,7 +580,7 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(f->qlen >= q->flow_plimit)) {
q->stat_flows_plimit++;
return qdisc_drop_reason(skb, sch, to_free,
- QDISC_DROP_FQ_FLOW_LIMIT);
+ QDISC_DROP_FLOW_LIMIT);
}
if (fq_flow_is_detached(f)) {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 96eb2f122973..efb796976a5b 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -302,7 +302,7 @@ static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
sfq_dec(q, x);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_OVERLIMIT);
return len;
}
@@ -363,7 +363,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
if (x == SFQ_EMPTY_SLOT) {
x = q->dep[0].next; /* get a free slot */
if (x >= SFQ_MAX_FLOWS)
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_MAXFLOWS);
q->ht[hash] = x;
slot = &q->slots[x];
slot->hash = hash;
@@ -420,14 +420,14 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
if (slot->qlen >= q->maxdepth) {
congestion_drop:
if (!sfq_headdrop(q))
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_FLOW_LIMIT);
/* We know we have at least one packet in queue */
head = slot_dequeue_head(slot);
delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
sch->qstats.backlog -= delta;
slot->backlog -= delta;
- qdisc_drop(head, sch, to_free);
+ qdisc_drop_reason(head, sch, to_free, QDISC_DROP_FLOW_LIMIT);
slot_queue_add(slot, skb);
qdisc_tree_reduce_backlog(sch, 0, delta);
Powered by blists - more mailing lists