[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190207003720.51096-4-posk@google.com>
Date: Wed, 6 Feb 2019 16:37:17 -0800
From: Peter Oskolkov <posk@...gle.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, netdev@...r.kernel.org
Cc: Peter Oskolkov <posk@...k.io>, David Ahern <dsahern@...il.com>,
Willem de Bruijn <willemb@...gle.com>,
Peter Oskolkov <posk@...gle.com>
Subject: [PATCH bpf-next v7 3/6] bpf: handle GSO in bpf_lwt_push_encap
This patch adds handling of GSO packets in bpf_lwt_push_ip_encap()
(called from bpf_lwt_push_encap):
* IPIP, GRE, and UDP encapsulation types are deduced by looking
into iphdr->protocol or ipv6hdr->next_header;
* an error is returned if the same GSO encap type is set on the skb;
* SCTP GSO packets are not supported (as bpf_skb_proto_4_to_6
and similar do);
* UDP_L4 GSO packets are also not supported (although they are
not blocked in bpf_skb_proto_4_to_6 and similar), as
skb_decrease_gso_size() will break it;
* SKB_GSO_DODGY bit is set.
Note: it may be possible to support SCTP and UDP_L4 gso packets;
but as these cases seem to be not well handled by other
tunneling/encapping code paths, the solution should
be generic enough to apply to all tunneling/encapping code.
Signed-off-by: Peter Oskolkov <posk@...gle.com>
---
net/core/lwt_bpf.c | 62 +++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 59 insertions(+), 3 deletions(-)
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 786b96148937..4ff60757bf23 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/bpf.h>
#include <net/lwtunnel.h>
+#include <net/gre.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
@@ -390,15 +391,70 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
.owner = THIS_MODULE,
};
-static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
+static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
+ int encap_len)
{
- /* Handling of GSO-enabled packets is added in the next patch. */
- if (unlikely(skb_is_gso(skb)))
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ /* Refuse to double-encap with the same type. */
+ if (shinfo->gso_type & gso_type)
return -EINVAL;
+ gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_type |= gso_type;
+ skb_decrease_gso_size(shinfo, encap_len);
+ shinfo->gso_segs = 0;
return 0;
}
+static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
+{
+ void *next_hdr;
+ __u8 protocol;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ /* SCTP and UDP_L4 gso need more nuanced handling than what
+ * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
+ */
+ if (unlikely(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_SCTP | SKB_GSO_UDP_L4)))
+ return -ENOTSUPP;
+
+ if (ipv4) {
+ protocol = ip_hdr(skb)->protocol;
+ next_hdr = skb_network_header(skb) + sizeof(struct iphdr);
+ } else {
+ protocol = ipv6_hdr(skb)->nexthdr;
+ next_hdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ }
+
+ switch (protocol) {
+ case IPPROTO_GRE:
+ if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
+ return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
+ encap_len);
+ return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
+
+ case IPPROTO_UDP:
+ if (((struct udphdr *)next_hdr)->check)
+ return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
+ encap_len);
+ return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
+
+ case IPPROTO_IP:
+ case IPPROTO_IPV6:
+ if (ipv4)
+ return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
+ else
+ return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+}
+
int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
{
struct iphdr *iph;
--
2.20.1.611.gfbb209baf1-goog
Powered by blists - more mailing lists