[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87y1yy8t6j.fsf@toke.dk>
Date: Wed, 18 May 2022 22:47:00 +0200
From: Toke Høiland-Jørgensen <toke@...hat.com>
To: Lorenzo Bianconi <lorenzo@...nel.org>, bpf@...r.kernel.org
Cc: netdev@...r.kernel.org, ast@...nel.org, daniel@...earbox.net,
andrii@...nel.org, davem@...emloft.net, kuba@...nel.org,
edumazet@...gle.com, pabeni@...hat.com, pablo@...filter.org,
fw@...len.de, netfilter-devel@...r.kernel.org,
lorenzo.bianconi@...hat.com, brouer@...hat.com, memxor@...il.com
Subject: Re: [PATCH v3 bpf-next 4/5] net: netfilter: add kfunc helper to add
a new ct entry
Lorenzo Bianconi <lorenzo@...nel.org> writes:
> Introduce bpf_xdp_ct_add and bpf_skb_ct_add kfunc helpers in order to
> add a new entry to ct map from an ebpf program.
> Introduce bpf_nf_ct_tuple_parse utility routine.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
> ---
> net/netfilter/nf_conntrack_bpf.c | 212 +++++++++++++++++++++++++++----
> 1 file changed, 189 insertions(+), 23 deletions(-)
>
> diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
> index a9271418db88..3d31b602fdf1 100644
> --- a/net/netfilter/nf_conntrack_bpf.c
> +++ b/net/netfilter/nf_conntrack_bpf.c
> @@ -55,41 +55,114 @@ enum {
> NF_BPF_CT_OPTS_SZ = 12,
> };
>
> -static struct nf_conn *__bpf_nf_ct_lookup(struct net *net,
> - struct bpf_sock_tuple *bpf_tuple,
> - u32 tuple_len, u8 protonum,
> - s32 netns_id, u8 *dir)
> +static int bpf_nf_ct_tuple_parse(struct bpf_sock_tuple *bpf_tuple,
> + u32 tuple_len, u8 protonum, u8 dir,
> + struct nf_conntrack_tuple *tuple)
> {
> - struct nf_conntrack_tuple_hash *hash;
> - struct nf_conntrack_tuple tuple;
> - struct nf_conn *ct;
> + union nf_inet_addr *src = dir ? &tuple->dst.u3 : &tuple->src.u3;
> + union nf_inet_addr *dst = dir ? &tuple->src.u3 : &tuple->dst.u3;
> + union nf_conntrack_man_proto *sport = dir ? (void *)&tuple->dst.u
> + : &tuple->src.u;
> + union nf_conntrack_man_proto *dport = dir ? &tuple->src.u
> + : (void *)&tuple->dst.u;
>
> if (unlikely(protonum != IPPROTO_TCP && protonum != IPPROTO_UDP))
> - return ERR_PTR(-EPROTO);
> - if (unlikely(netns_id < BPF_F_CURRENT_NETNS))
> - return ERR_PTR(-EINVAL);
> + return -EPROTO;
> +
> + memset(tuple, 0, sizeof(*tuple));
>
> - memset(&tuple, 0, sizeof(tuple));
> switch (tuple_len) {
> case sizeof(bpf_tuple->ipv4):
> - tuple.src.l3num = AF_INET;
> - tuple.src.u3.ip = bpf_tuple->ipv4.saddr;
> - tuple.src.u.tcp.port = bpf_tuple->ipv4.sport;
> - tuple.dst.u3.ip = bpf_tuple->ipv4.daddr;
> - tuple.dst.u.tcp.port = bpf_tuple->ipv4.dport;
> + tuple->src.l3num = AF_INET;
> + src->ip = bpf_tuple->ipv4.saddr;
> + sport->tcp.port = bpf_tuple->ipv4.sport;
> + dst->ip = bpf_tuple->ipv4.daddr;
> + dport->tcp.port = bpf_tuple->ipv4.dport;
> break;
> case sizeof(bpf_tuple->ipv6):
> - tuple.src.l3num = AF_INET6;
> - memcpy(tuple.src.u3.ip6, bpf_tuple->ipv6.saddr, sizeof(bpf_tuple->ipv6.saddr));
> - tuple.src.u.tcp.port = bpf_tuple->ipv6.sport;
> - memcpy(tuple.dst.u3.ip6, bpf_tuple->ipv6.daddr, sizeof(bpf_tuple->ipv6.daddr));
> - tuple.dst.u.tcp.port = bpf_tuple->ipv6.dport;
> + tuple->src.l3num = AF_INET6;
> + memcpy(src->ip6, bpf_tuple->ipv6.saddr, sizeof(bpf_tuple->ipv6.saddr));
> + sport->tcp.port = bpf_tuple->ipv6.sport;
> + memcpy(dst->ip6, bpf_tuple->ipv6.daddr, sizeof(bpf_tuple->ipv6.daddr));
> + dport->tcp.port = bpf_tuple->ipv6.dport;
> break;
> default:
> - return ERR_PTR(-EAFNOSUPPORT);
> + return -EAFNOSUPPORT;
> }
> + tuple->dst.protonum = protonum;
> + tuple->dst.dir = dir;
> +
> + return 0;
> +}
>
> - tuple.dst.protonum = protonum;
> +struct nf_conn *
> +__bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple,
> + u32 tuple_len, u8 protonum, s32 netns_id, u32 timeout)
> +{
> + struct nf_conntrack_tuple otuple, rtuple;
> + struct nf_conn *ct;
> + int err;
> +
> + if (unlikely(netns_id < BPF_F_CURRENT_NETNS))
> + return ERR_PTR(-EINVAL);
> +
> + err = bpf_nf_ct_tuple_parse(bpf_tuple, tuple_len, protonum,
> + IP_CT_DIR_ORIGINAL, &otuple);
> + if (err < 0)
> + return ERR_PTR(err);
> +
> + err = bpf_nf_ct_tuple_parse(bpf_tuple, tuple_len, protonum,
> + IP_CT_DIR_REPLY, &rtuple);
> + if (err < 0)
> + return ERR_PTR(err);
> +
> + if (netns_id >= 0) {
> + net = get_net_ns_by_id(net, netns_id);
> + if (unlikely(!net))
> + return ERR_PTR(-ENONET);
> + }
> +
> + ct = nf_conntrack_alloc(net, &nf_ct_zone_dflt, &otuple, &rtuple,
> + GFP_ATOMIC);
> + if (IS_ERR(ct))
> + goto out;
> +
> + ct->timeout = timeout * HZ + jiffies;
> + ct->status |= IPS_CONFIRMED;
> +
> + memset(&ct->proto, 0, sizeof(ct->proto));
> + if (protonum == IPPROTO_TCP)
> + ct->proto.tcp.state = TCP_CONNTRACK_ESTABLISHED;
Hmm, isn't it a bit limiting to hard-code this to ESTABLISHED
connections? Presumably for TCP you'd want to use this when you see a
SYN and then rely on conntrack to help with the subsequent state
tracking for when the SYN-ACK comes back? What's the usecase for
creating an entry in ESTABLISHED state, exactly?
(Of course, we'd need to be able to update the state as well, then...)
-Toke
Powered by blists - more mailing lists