[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7c4c0ee1b5c4e0af61dc4ffb199df78b93499879.1590512901.git.petrm@mellanox.com>
Date: Tue, 26 May 2020 20:10:07 +0300
From: Petr Machata <petrm@...lanox.com>
To: netdev@...r.kernel.org
Cc: Jakub Kicinski <kuba@...nel.org>,
Eric Dumazet <eric.dumazet@...il.com>, jhs@...atatu.com,
jiri@...lanox.com, idosch@...lanox.com,
Petr Machata <petrm@...lanox.com>
Subject: [RFC PATCH net-next 3/3] net: sched: sch_red: Add qevents "early" and "mark"
In order to allow acting on dropped and/or ECN-marked packets, add two new
qevents to the RED qdisc: "early" and "mark". Filters attached at "early"
block are executed as packets are early-dropped, those attached at the
"mark" block are executed as packets are ECN-marked.
Two new attributes are introduced: TCA_RED_EARLY_BLOCK with the block index
for the "early" qevent, and TCA_RED_MARK_BLOCK for the "mark" qevent.
Absence of these attributes signifies "don't care": no block is allocated
in that case, or the existing blocks are left intact in case of the change
callback.
For purposes of offloading, blocks attached to these qevents appear with
newly-introduced binder types, FLOW_BLOCK_BINDER_TYPE_RED_EARLY and
FLOW_BLOCK_BINDER_TYPE_RED_MARK.
Signed-off-by: Petr Machata <petrm@...lanox.com>
---
include/net/flow_offload.h | 2 ++
include/uapi/linux/pkt_sched.h | 2 ++
net/sched/sch_red.c | 59 +++++++++++++++++++++++++++++++++-
3 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 4001ffb04f0d..635d2bb57550 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -416,6 +416,8 @@ enum flow_block_binder_type {
FLOW_BLOCK_BINDER_TYPE_UNSPEC,
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
+ FLOW_BLOCK_BINDER_TYPE_RED_EARLY,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
};
struct flow_block {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index a95f3ae7ab37..ff3f4830e049 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -257,6 +257,8 @@ enum {
TCA_RED_STAB,
TCA_RED_MAX_P,
TCA_RED_FLAGS, /* bitfield32 */
+ TCA_RED_EARLY_BLOCK, /* u32 */
+ TCA_RED_MARK_BLOCK, /* u32 */
__TCA_RED_MAX,
};
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index c52a40ad5e59..0c6a6429ca02 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -46,6 +46,8 @@ struct red_sched_data {
struct red_vars vars;
struct red_stats stats;
struct Qdisc *qdisc;
+ struct tcf_qevent qe_early;
+ struct tcf_qevent qe_mark;
};
#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
@@ -92,6 +94,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (INET_ECN_set_ce(skb)) {
q->stats.prob_mark++;
+ skb = tcf_qevent_handle(&q->qe_mark, sch,
+ skb, to_free, &ret);
+ if (!skb)
+ return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
q->stats.prob_drop++;
goto congestion_drop;
@@ -109,6 +115,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (INET_ECN_set_ce(skb)) {
q->stats.forced_mark++;
+ skb = tcf_qevent_handle(&q->qe_mark, sch,
+ skb, to_free, &ret);
+ if (!skb)
+ return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
q->stats.forced_drop++;
goto congestion_drop;
@@ -129,6 +139,11 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return ret;
congestion_drop:
+ skb = tcf_qevent_handle(&q->qe_early, sch,
+ skb, to_free, &ret);
+ if (!skb)
+ return NET_XMIT_CN | ret;
+
qdisc_drop(skb, sch, to_free);
return NET_XMIT_CN;
}
@@ -202,6 +217,8 @@ static void red_destroy(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
+ tcf_qevent_destroy(&q->qe_mark, sch);
+ tcf_qevent_destroy(&q->qe_early, sch);
del_timer_sync(&q->adapt_timer);
red_offload(sch, false);
qdisc_put(q->qdisc);
@@ -213,6 +230,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
[TCA_RED_STAB] = { .len = RED_STAB_SIZE },
[TCA_RED_MAX_P] = { .type = NLA_U32 },
[TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
+ [TCA_RED_EARLY_BLOCK] = { .type = NLA_U32 },
+ [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
};
static int __red_change(struct Qdisc *sch, struct nlattr **tb,
@@ -328,7 +347,35 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
q->qdisc = &noop_qdisc;
q->sch = sch;
timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
- return __red_change(sch, tb, extack);
+
+ q->qe_early.attr_name = TCA_RED_EARLY_BLOCK;
+ q->qe_mark.attr_name = TCA_RED_MARK_BLOCK;
+
+ err = __red_change(sch, tb, extack);
+ if (err)
+ return err;
+
+ err = tcf_qevent_init(&q->qe_early, sch,
+ FLOW_BLOCK_BINDER_TYPE_RED_EARLY,
+ tb[TCA_RED_EARLY_BLOCK], extack);
+ if (err)
+ goto err_early_init;
+
+ err = tcf_qevent_init(&q->qe_mark, sch,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
+ tb[TCA_RED_MARK_BLOCK], extack);
+ if (err)
+ goto err_mark_init;
+
+ return 0;
+
+err_mark_init:
+ tcf_qevent_destroy(&q->qe_early, sch);
+err_early_init:
+ del_timer_sync(&q->adapt_timer);
+ red_offload(sch, false);
+ qdisc_put(q->qdisc);
+ return err;
}
static int red_change(struct Qdisc *sch, struct nlattr *opt,
@@ -346,6 +393,16 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
+ err = tcf_qevent_validate_change(&q->qe_early, tb[TCA_RED_EARLY_BLOCK],
+ extack);
+ if (err)
+ return err;
+
+ err = tcf_qevent_validate_change(&q->qe_mark, tb[TCA_RED_MARK_BLOCK],
+ extack);
+ if (err)
+ return err;
+
return __red_change(sch, tb, extack);
}
--
2.20.1
Powered by blists - more mailing lists