[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Ztcf88I1epYlIYGS@hog>
Date: Tue, 3 Sep 2024 16:40:51 +0200
From: Sabrina Dubroca <sd@...asysnail.net>
To: Antonio Quartulli <antonio@...nvpn.net>
Cc: netdev@...r.kernel.org, kuba@...nel.org, pabeni@...hat.com,
ryazanov.s.a@...il.com, edumazet@...gle.com, andrew@...n.ch
Subject: Re: [PATCH net-next v6 15/25] ovpn: implement multi-peer support
2024-08-27, 14:07:55 +0200, Antonio Quartulli wrote:
> static int ovpn_net_init(struct net_device *dev)
> {
> struct ovpn_struct *ovpn = netdev_priv(dev);
> + int i, err = gro_cells_init(&ovpn->gro_cells, dev);
I'm not a fan of "hiding" the gro_cells_init call up here. I'd prefer
if this was done just before the corresponding "if (err)".
> + struct in_device *dev_v4;
>
> - return gro_cells_init(&ovpn->gro_cells, dev);
> + if (err)
> + return err;
> +
> + if (ovpn->mode == OVPN_MODE_MP) {
> + dev_v4 = __in_dev_get_rtnl(dev);
> + if (dev_v4) {
> + /* disable redirects as Linux gets confused by ovpn
> + * handling same-LAN routing.
> + * This happens because a multipeer interface is used as
> + * relay point between hosts in the same subnet, while
> + * in a classic LAN this would not be needed because the
> + * two hosts would be able to talk directly.
> + */
> + IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
> + IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
> + }
> +
> + /* the peer container is fairly large, therefore we dynamically
> + * allocate it only when needed
> + */
> + ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL);
> + if (!ovpn->peers)
missing gro_cells_destroy
> + return -ENOMEM;
> +
> + spin_lock_init(&ovpn->peers->lock_by_id);
> + spin_lock_init(&ovpn->peers->lock_by_vpn_addr);
> + spin_lock_init(&ovpn->peers->lock_by_transp_addr);
What's the benefit of having 3 separate locks instead of a single lock
protecting all the hashtables?
> +
> + for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) {
> + INIT_HLIST_HEAD(&ovpn->peers->by_id[i]);
> + INIT_HLIST_HEAD(&ovpn->peers->by_vpn_addr[i]);
> + INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i],
> + i);
> + }
> + }
> +
> + return 0;
> }
> +static int ovpn_peer_add_mp(struct ovpn_struct *ovpn, struct ovpn_peer *peer)
> +{
> + struct sockaddr_storage sa = { 0 };
> + struct hlist_nulls_head *nhead;
> + struct sockaddr_in6 *sa6;
> + struct sockaddr_in *sa4;
> + struct hlist_head *head;
> + struct ovpn_bind *bind;
> + struct ovpn_peer *tmp;
> + size_t salen;
> +
> + spin_lock_bh(&ovpn->peers->lock_by_id);
> + /* do not add duplicates */
> + tmp = ovpn_peer_get_by_id(ovpn, peer->id);
> + if (tmp) {
> + ovpn_peer_put(tmp);
> + spin_unlock_bh(&ovpn->peers->lock_by_id);
> + return -EEXIST;
> + }
> +
> + hlist_add_head_rcu(&peer->hash_entry_id,
> + ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
> + sizeof(peer->id)));
> + spin_unlock_bh(&ovpn->peers->lock_by_id);
> +
> + bind = rcu_dereference_protected(peer->bind, true);
> + /* peers connected via TCP have bind == NULL */
> + if (bind) {
> + switch (bind->remote.in4.sin_family) {
> + case AF_INET:
> + sa4 = (struct sockaddr_in *)&sa;
> +
> + sa4->sin_family = AF_INET;
> + sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
> + sa4->sin_port = bind->remote.in4.sin_port;
> + salen = sizeof(*sa4);
> + break;
> + case AF_INET6:
> + sa6 = (struct sockaddr_in6 *)&sa;
> +
> + sa6->sin6_family = AF_INET6;
> + sa6->sin6_addr = bind->remote.in6.sin6_addr;
> + sa6->sin6_port = bind->remote.in6.sin6_port;
> + salen = sizeof(*sa6);
> + break;
> + default:
And remove from the by_id hashtable? Or is that handled somewhere that
I missed (I don't think ovpn_peer_unhash gets called in that case)?
> + return -EPROTONOSUPPORT;
> + }
> +
--
Sabrina
Powered by blists - more mailing lists