[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <3c20787be0fd5d64728ffed46ae0a7dff10d7e05.1532437050.git.pabeni@redhat.com>
Date: Tue, 24 Jul 2018 22:06:42 +0200
From: Paolo Abeni <pabeni@...hat.com>
To: netdev@...r.kernel.org
Cc: Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Jiri Pirko <jiri@...nulli.us>,
Daniel Borkmann <daniel@...earbox.net>,
Marcelo Ricardo Leitner <marcelo.leitner@...il.com>,
Eyal Birger <eyal.birger@...il.com>,
"David S. Miller" <davem@...emloft.net>
Subject: [PATCH net-next v3 4/5] net/tc: introduce TC_ACT_REINJECT.
This is similar TC_ACT_REDIRECT, but with a slightly different
semantic:
- on ingress the mirred skbs are passed to the target device
network stack without any additional check not scrubbing.
- the rcu-protected stats provided via the tcf_result struct
are updated on error conditions.
This new tcfa_action value is not exposed to the user-space
and can be used only internally by clsact.
v1 -> v2: do not touch TC_ACT_REDIRECT code path, introduce
a new action type instead
v2 -> v3:
- rename the new action value TC_ACT_REINJECT, update the
helper accordingly
- take care of uncloned reinjected packets in XDP generic
hook
Signed-off-by: Paolo Abeni <pabeni@...hat.com>
---
include/net/pkt_cls.h | 3 +++
include/net/sch_generic.h | 19 +++++++++++++++++++
net/core/dev.c | 6 +++++-
3 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 2081e4219f81..36ccfe2a303a 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -7,6 +7,9 @@
#include <net/sch_generic.h>
#include <net/act_api.h>
+/* TC action not accessible from user space */
+#define TC_ACT_REINJECT (TC_ACT_VALUE_MAX + 1)
+
/* Basic packet classifier frontend definitions. */
struct tcf_walker {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 056dc1083aa3..95e81a70f549 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -235,6 +235,12 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
+
+ /* used by the TC_ACT_REINJECT action */
+ struct {
+ bool ingress;
+ struct gnet_stats_queue *qstats;
+ };
};
};
@@ -1091,4 +1097,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+static inline void skb_tc_reinject(struct sk_buff *skb, struct tcf_result *res)
+{
+ struct gnet_stats_queue *stats = res->qstats;
+ int ret;
+
+ if (res->ingress)
+ ret = netif_receive_skb(skb);
+ else
+ ret = dev_queue_xmit(skb);
+ if (ret && stats)
+ qstats_overlimit_inc(res->qstats);
+}
+
#endif
diff --git a/net/core/dev.c b/net/core/dev.c
index 14a748ee8cc9..826ec74fe1d9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4252,7 +4252,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
- if (skb_cloned(skb))
+ if (skb_cloned(skb) || skb->tc_redirected)
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
@@ -4602,6 +4602,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
__skb_push(skb, skb->mac_len);
skb_do_redirect(skb);
return NULL;
+ case TC_ACT_REINJECT:
+ /* this does not scrub the packet, and updates stats on error */
+ skb_tc_reinject(skb, &cl_res);
+ return NULL;
default:
break;
}
--
2.17.1
Powered by blists - more mailing lists