[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230607101945.65c5df51@kernel.org>
Date: Wed, 7 Jun 2023 10:19:45 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: David Howells <dhowells@...hat.com>
Cc: netdev@...r.kernel.org,
Linus Torvalds <torvalds@...ux-foundation.org>,
Chuck Lever <chuck.lever@...cle.com>,
Boris Pismenny <borisp@...dia.com>,
John Fastabend <john.fastabend@...il.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
David Ahern <dsahern@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Jens Axboe <axboe@...nel.dk>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next v5 11/14] tls/sw: Support MSG_SPLICE_PAGES
On Wed, 7 Jun 2023 15:05:56 +0100 David Howells wrote:
> +static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
> + struct sk_msg *msg_pl, size_t try_to_copy,
> + ssize_t *copied)
> +{
> + struct page *page = NULL, **pages = &page;
> +
> + do {
> + ssize_t part;
> + size_t off;
> + bool put = false;
> +
> + part = iov_iter_extract_pages(&msg->msg_iter, &pages,
> + try_to_copy, 1, 0, &off);
> + if (part <= 0)
> + return part ?: -EIO;
> +
> + if (WARN_ON_ONCE(!sendpage_ok(page))) {
> + iov_iter_revert(&msg->msg_iter, part);
> + return -EIO;
> + }
> +
> + sk_msg_page_add(msg_pl, page, part, off);
> + sk_mem_charge(sk, part);
> + if (put)
> + put_page(page);
is put ever set to true?
> + *copied += part;
> + try_to_copy -= part;
> + } while (try_to_copy && !sk_msg_full(msg_pl));
> +
> + return 0;
> +}
> +
> int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
> {
> long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
> @@ -1020,6 +1052,17 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
> full_record = true;
> }
>
> + if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
> + ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
> + try_to_copy, &copied);
> + if (ret < 0)
> + goto send_end;
> + tls_ctx->pending_open_record_frags = true;
> + if (full_record || eor || sk_msg_full(msg_pl))
> + goto copied;
> + continue;
> + }
> +
> if (!is_kvec && (full_record || eor) && !async_capable) {
> u32 first = msg_pl->sg.end;
>
> @@ -1082,8 +1125,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
> /* Open records defined only if successfully copied, otherwise
> * we would trim the sg but not reset the open record frags.
> */
> - tls_ctx->pending_open_record_frags = true;
> copied += try_to_copy;
> +copied:
> + tls_ctx->pending_open_record_frags = true;
Why move pending-open-record-frags setting if it's also set before
jumping?
> if (full_record || eor) {
> ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
> record_type, &copied,
Powered by blists - more mailing lists