[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZLl9wUxKrZpgHMxY@corigine.com>
Date: Thu, 20 Jul 2023 19:32:33 +0100
From: Simon Horman <simon.horman@...igine.com>
To: Hannes Reinecke <hare@...e.de>
Cc: Christoph Hellwig <hch@....de>, Sagi Grimberg <sagi@...mberg.me>,
Keith Busch <kbusch@...nel.org>, linux-nvme@...ts.infradead.org,
Jakub Kicinski <kuba@...nel.org>,
Eric Dumazet <edumazet@...gle.com>, Paolo Abeni <pabeni@...hat.com>,
netdev@...r.kernel.org, Boris Pismenny <boris.pismenny@...il.com>
Subject: Re: [PATCH 6/6] net/tls: implement ->read_sock()
On Wed, Jul 19, 2023 at 01:38:36PM +0200, Hannes Reinecke wrote:
...
Hi Hannes,
> diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
> index d0636ea13009..4829d2cb9a7c 100644
> --- a/net/tls/tls_sw.c
> +++ b/net/tls/tls_sw.c
> @@ -2202,6 +2202,102 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
> goto splice_read_end;
> }
>
> +int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
> + sk_read_actor_t read_actor)
> +{
> + struct tls_context *tls_ctx = tls_get_ctx(sk);
> + struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
> + struct strp_msg *rxm = NULL;
> + struct tls_msg *tlm;
> + struct sk_buff *skb;
> + struct sk_psock *psock;
> + ssize_t copied = 0;
> + bool bpf_strp_enabled;
> + int err, used;
> +
> + psock = sk_psock_get(sk);
> + err = tls_rx_reader_acquire(sk, ctx, true);
> + if (err < 0)
> + goto psock_put;
skb is uninitialised here,
however, it is used in the psock_put unwind path.
Flagged by gcc-12 [-Wmaybe-uninitialized] and Smatch.
> + bpf_strp_enabled = sk_psock_strp_enabled(psock);
> +
> + /* If crypto failed the connection is broken */
> + err = ctx->async_wait.err;
> + if (err)
> + goto read_sock_end;
Likewise, here.
> +
> + do {
> + if (!skb_queue_empty(&ctx->rx_list)) {
> + skb = __skb_dequeue(&ctx->rx_list);
> + rxm = strp_msg(skb);
> + } else {
> + struct tls_decrypt_arg darg;
> +
> + err = tls_rx_rec_wait(sk, psock, true, true);
> + if (err <= 0)
> + goto read_sock_end;
> +
> + memset(&darg.inargs, 0, sizeof(darg.inargs));
> + darg.zc = !bpf_strp_enabled && ctx->zc_capable;
> +
> + rxm = strp_msg(tls_strp_msg(ctx));
> + tlm = tls_msg(tls_strp_msg(ctx));
> +
> + /* read_sock does not support reading control messages */
> + if (tlm->control != TLS_RECORD_TYPE_DATA) {
> + err = -EINVAL;
> + goto read_sock_requeue;
> + }
> +
> + if (!bpf_strp_enabled)
> + darg.async = ctx->async_capable;
> + else
> + darg.async = false;
> +
> + err = tls_rx_one_record(sk, NULL, &darg);
> + if (err < 0) {
> + tls_err_abort(sk, -EBADMSG);
> + goto read_sock_end;
> + }
> +
> + sk_flush_backlog(sk);
> + skb = darg.skb;
> + rxm = strp_msg(skb);
> +
> + tls_rx_rec_done(ctx);
> + }
> +
> + used = read_actor(desc, skb, rxm->offset, rxm->full_len);
> + if (used <= 0) {
> + if (!copied)
> + err = used;
> + goto read_sock_end;
> + }
> + copied += used;
> + if (used < rxm->full_len) {
> + rxm->offset += used;
> + rxm->full_len -= used;
> + if (!desc->count)
> + goto read_sock_requeue;
> + } else {
> + consume_skb(skb);
> + if (!desc->count)
> + skb = NULL;
> + }
> + } while (skb);
> +
> +read_sock_end:
> + tls_rx_reader_release(sk, ctx);
> +psock_put:
> + if (psock)
> + sk_psock_put(sk, psock);
> + return copied ? : err;
> +
> +read_sock_requeue:
> + __skb_queue_head(&ctx->rx_list, skb);
> + goto read_sock_end;
> +}
> +
> bool tls_sw_sock_is_readable(struct sock *sk)
> {
> struct tls_context *tls_ctx = tls_get_ctx(sk);
Powered by blists - more mailing lists