[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z2AyLOMazyOCDopc@hog>
Date: Mon, 16 Dec 2024 14:59:08 +0100
From: Sabrina Dubroca <sd@...asysnail.net>
To: Antonio Quartulli <antonio@...nvpn.net>
Cc: netdev@...r.kernel.org, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Donald Hunter <donald.hunter@...il.com>,
Shuah Khan <shuah@...nel.org>, ryazanov.s.a@...il.com,
Andrew Lunn <andrew+netdev@...n.ch>,
Simon Horman <horms@...nel.org>, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org, Xiao Liang <shaw.leon@...il.com>,
dsahern@...nel.org
Subject: Re: [PATCH net-next v15 11/22] ovpn: implement TCP transport
2024-12-11, 22:15:15 +0100, Antonio Quartulli wrote:
> @@ -42,6 +56,31 @@ struct ovpn_peer {
> struct in6_addr ipv6;
> } vpn_addrs;
> struct ovpn_socket *sock;
> +
> + /* state of the TCP reading. Needed to keep track of how much of a
> + * single packet has already been read from the stream and how much is
> + * missing
> + */
nit: not so accurate since the switch to strp, can probably be dropped
since @tcp has a kdoc entry
> + struct {
> + struct strparser strp;
> + struct work_struct tx_work;
> + struct sk_buff_head user_queue;
> + struct sk_buff_head out_queue;
> + bool tx_in_progress;
> +
> + struct {
> + struct sk_buff *skb;
> + int offset;
> + int len;
> + } out_msg;
> +
> + struct {
> + void (*sk_data_ready)(struct sock *sk);
> + void (*sk_write_space)(struct sock *sk);
> + struct proto *prot;
> + const struct proto_ops *ops;
> + } sk_cb;
> + } tcp;
[...]
> +static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sk_buff *skb)
> +{
> + if (peer->tcp.out_msg.skb)
> + ovpn_tcp_send_sock(peer);
> +
> + if (peer->tcp.out_msg.skb) {
> + dev_core_stats_rx_dropped_inc(peer->ovpn->dev);
tx_dropped?
> + kfree_skb(skb);
> + return;
> + }
> +
> + peer->tcp.out_msg.skb = skb;
> + peer->tcp.out_msg.len = skb->len;
> + peer->tcp.out_msg.offset = 0;
> + ovpn_tcp_send_sock(peer);
> +}
> +
> +void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sk_buff *skb)
> +{
> + u16 len = skb->len;
> +
> + *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
> +
> + bh_lock_sock(peer->sock->sock->sk);
> + if (sock_owned_by_user(peer->sock->sock->sk)) {
> + if (skb_queue_len(&peer->tcp.out_queue) >=
> + READ_ONCE(net_hotdata.max_backlog)) {
> + dev_core_stats_rx_dropped_inc(peer->ovpn->dev);
tx_dropped?
> + kfree_skb(skb);
> + goto unlock;
> + }
> + __skb_queue_tail(&peer->tcp.out_queue, skb);
> + } else {
> + ovpn_tcp_send_sock_skb(peer, skb);
> + }
> +unlock:
> + bh_unlock_sock(peer->sock->sock->sk);
> +}
[...]
> +static void ovpn_tcp_close(struct sock *sk, long timeout)
> +{
> + struct ovpn_socket *sock;
> +
> + rcu_read_lock();
[can't sleep until unlock]
> + sock = rcu_dereference_sk_user_data(sk);
> +
> + strp_stop(&sock->peer->tcp.strp);
> +
> + tcp_close(sk, timeout);
void tcp_close(struct sock *sk, long timeout)
{
lock_sock(sk);
but this can sleep.
Is there anything that prevents delaying tcp_close until after
ovpn_peer_del and rcu_read_unlock?
> + ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
> + rcu_read_unlock();
> +}
[...]
> +void __init ovpn_tcp_init(void)
> +{
> + ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot,
> + &inet_stream_ops);
> +
> +#if IS_ENABLED(CONFIG_IPV6)
> + ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, &tcpv6_prot,
> + &inet6_stream_ops);
I don't think that works for CONFIG_OVPN=y and CONFIG_IPV6=m. You can
either go back to the ugly thing espintcp and tls do, or use the
traditional Kconfig hack:
depends on IPV6 || !IPV6
(you can find it sprinkled in various places of drivers/net/Kconfig
and net/)
--
Sabrina
Powered by blists - more mailing lists