[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZlYJaIvXY3nuNd98@hog>
Date: Tue, 28 May 2024 18:42:16 +0200
From: Sabrina Dubroca <sd@...asysnail.net>
To: Antonio Quartulli <antonio@...nvpn.net>
Cc: netdev@...r.kernel.org, Jakub Kicinski <kuba@...nel.org>,
Sergey Ryazanov <ryazanov.s.a@...il.com>,
Paolo Abeni <pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>,
Andrew Lunn <andrew@...n.ch>, Esben Haabendal <esben@...nix.com>
Subject: Re: [PATCH net-next v3 15/24] ovpn: implement peer lookup logic
2024-05-06, 03:16:28 +0200, Antonio Quartulli wrote:
> +static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
> +{
> + struct rt6_info *rt = (struct rt6_info *)skb_rtable(skb);
skb_rt6_info?
> +
> + if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
> + return ipv6_hdr(skb)->daddr;
> +
> + return rt->rt6i_gateway;
> +}
> +
> +/**
> + * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
> + * @head: list head to search
> + * @addr: VPN IPv4 to use as search key
> + *
> + * Return: the peer if found or NULL otherwise
The doc for all those ovpn_peer_get_* functions could indicate that on
success, a reference on the peer is held.
[...]
> +static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct hlist_head *head,
> + struct in6_addr *addr)
> +{
> + struct ovpn_peer *tmp, *peer = NULL;
> + int i;
> +
> + rcu_read_lock();
> + hlist_for_each_entry_rcu(tmp, head, hash_entry_addr6) {
> + for (i = 0; i < 4; i++) {
> + if (addr->s6_addr32[i] !=
> + tmp->vpn_addrs.ipv6.s6_addr32[i])
> + continue;
> + }
ipv6_addr_equal
[...]
> + default:
> + return NULL;
> + }
> +
> + index = ovpn_peer_index(ovpn->peers.by_transp_addr, &ss, sa_len);
> + head = &ovpn->peers.by_transp_addr[index];
Maybe worth adding a get_bucket helper (with a better name :)) instead
of ovpn_peer_index, since all uses of ovpn_peer_index are followed by
a "head = TBL[index]" (or direct use in some hlist iterator), but the
index itself is not used later on, only the bucket.
> +
> + rcu_read_lock();
> + hlist_for_each_entry_rcu(tmp, head, hash_entry_transp_addr) {
> + found = ovpn_peer_transp_match(tmp, &ss);
> + if (!found)
nit: call ovpn_peer_transp_match directly and drop the found variable
> + continue;
> +
> + if (!ovpn_peer_hold(tmp))
> + continue;
> +
> + peer = tmp;
> + break;
> + }
> + rcu_read_unlock();
>
> return peer;
> }
> @@ -303,10 +427,28 @@ static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_struct *ovpn,
>
> struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_struct *ovpn, u32 peer_id)
> {
> - struct ovpn_peer *peer = NULL;
> + struct ovpn_peer *tmp, *peer = NULL;
> + struct hlist_head *head;
> + u32 index;
>
> if (ovpn->mode == OVPN_MODE_P2P)
> - peer = ovpn_peer_get_by_id_p2p(ovpn, peer_id);
> + return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
> +
> + index = ovpn_peer_index(ovpn->peers.by_id, &peer_id, sizeof(peer_id));
> + head = &ovpn->peers.by_id[index];
> +
> + rcu_read_lock();
> + hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
> + if (tmp->id != peer_id)
> + continue;
> +
> + if (!ovpn_peer_hold(tmp))
> + continue;
Can there ever be multiple peers with the same id? (ie, is it worth
continuing the loop if this fails? the same question probably applies
to ovpn_peer_get_by_transp_addr as well)
> + peer = tmp;
> + break;
> + }
> + rcu_read_unlock();
>
> return peer;
> }
> @@ -328,6 +470,11 @@ struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_struct *ovpn,
> struct sk_buff *skb)
> {
> struct ovpn_peer *tmp, *peer = NULL;
> + struct hlist_head *head;
> + sa_family_t sa_fam;
> + struct in6_addr addr6;
> + __be32 addr4;
> + u32 index;
>
> /* in P2P mode, no matter the destination, packets are always sent to
> * the single peer listening on the other side
> @@ -338,15 +485,123 @@ struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_struct *ovpn,
> if (likely(tmp && ovpn_peer_hold(tmp)))
> peer = tmp;
> rcu_read_unlock();
> + return peer;
> + }
> +
> + sa_fam = skb_protocol_to_family(skb);
> +
> + switch (sa_fam) {
> + case AF_INET:
> + addr4 = ovpn_nexthop_from_skb4(skb);
> + index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &addr4,
> + sizeof(addr4));
> + head = &ovpn->peers.by_vpn_addr[index];
> +
> + peer = ovpn_peer_get_by_vpn_addr4(head, &addr4);
> + break;
> + case AF_INET6:
> + addr6 = ovpn_nexthop_from_skb6(skb);
> + index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &addr6,
> + sizeof(addr6));
> + head = &ovpn->peers.by_vpn_addr[index];
> +
> + peer = ovpn_peer_get_by_vpn_addr6(head, &addr6);
The index -> head -> peer code is identical in get_by_dst and
get_by_src, it could be stuffed into ovpn_peer_get_by_vpn_addr{4,6}.
> + break;
> }
>
> return peer;
> }
[snip the _rt4 variant, comments apply to both]
> +/**
> + * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
I'm a bit confused by this talk about "destination" when those two
functions are then used with the source address from the packet, from
a function called "get_by_src".
> + * @ovpn: the private data representing the current VPN session
> + * @dst: the destination to be looked up
> + *
> + * Looks up in the IPv6 system routing table the IO of the nexthop to be used
"the IO"?
> + * to reach the destination passed as argument. IF no nexthop can be found, the
> + * destination itself is returned as it probably has to be used as nexthop.
> + *
> + * Return: the IP of the next hop if found or the dst itself otherwise
"the dst" tends to refer to a dst_entry, maybe "or @dst otherwise"?
(though I'm not sure that's valid kdoc)
(also for ovpn_nexthop_from_rt4)
> + */
> +static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_struct *ovpn,
> + struct in6_addr dst)
> +{
> +#if IS_ENABLED(CONFIG_IPV6)
> + struct dst_entry *entry;
> + struct rt6_info *rt;
> + struct flowi6 fl = {
> + .daddr = dst,
> + };
> +
> + entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl,
> + NULL);
> + if (IS_ERR(entry)) {
> + net_dbg_ratelimited("%s: no route to host %pI6c\n", __func__,
> + &dst);
> + /* if we end up here this packet is probably going to be
> + * thrown away later
> + */
> + return dst;
> + }
> +
> + rt = container_of(entry, struct rt6_info, dst);
dst_rt6_info(entry)
> +
> + if (!(rt->rt6i_flags & RTF_GATEWAY))
> + goto out;
> +
> + dst = rt->rt6i_gateway;
> +out:
> + dst_release((struct dst_entry *)rt);
> +#endif
> + return dst;
> +}
> +
> struct ovpn_peer *ovpn_peer_get_by_src(struct ovpn_struct *ovpn,
> struct sk_buff *skb)
> {
> struct ovpn_peer *tmp, *peer = NULL;
> + struct hlist_head *head;
> + sa_family_t sa_fam;
> + struct in6_addr addr6;
> + __be32 addr4;
> + u32 index;
>
> /* in P2P mode, no matter the destination, packets are always sent to
> * the single peer listening on the other side
> @@ -357,6 +612,28 @@ struct ovpn_peer *ovpn_peer_get_by_src(struct ovpn_struct *ovpn,
> if (likely(tmp && ovpn_peer_hold(tmp)))
> peer = tmp;
> rcu_read_unlock();
> + return peer;
> + }
> +
> + sa_fam = skb_protocol_to_family(skb);
> +
> + switch (sa_fam) {
nit:
switch (skb_protocol_to_family(skb))
seems a bit more readable to me (also in ovpn_peer_get_by_dst) - and
saves you from reverse xmas tree complaints (sa_fam should have been
after addr6)
> + case AF_INET:
> + addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
> + index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &addr4,
> + sizeof(addr4));
> + head = &ovpn->peers.by_vpn_addr[index];
> +
> + peer = ovpn_peer_get_by_vpn_addr4(head, &addr4);
> + break;
> + case AF_INET6:
> + addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
> + index = ovpn_peer_index(ovpn->peers.by_vpn_addr, &addr6,
> + sizeof(addr6));
> + head = &ovpn->peers.by_vpn_addr[index];
> +
> + peer = ovpn_peer_get_by_vpn_addr6(head, &addr6);
> + break;
> }
>
> return peer;
> --
> 2.43.2
>
>
--
Sabrina
Powered by blists - more mailing lists