lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6F2985FF-2474-4F36-BD94-5F8E97E46AC2@oracle.com>
Date:   Wed, 29 Mar 2023 15:28:19 +0000
From:   Chuck Lever III <chuck.lever@...cle.com>
To:     David Howells <dhowells@...hat.com>
CC:     Matthew Wilcox <willy@...radead.org>,
        "David S. Miller" <davem@...emloft.net>,
        Eric Dumazet <edumazet@...gle.com>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>,
        Al Viro <viro@...iv.linux.org.uk>,
        Christoph Hellwig <hch@...radead.org>,
        Jens Axboe <axboe@...nel.dk>, Jeff Layton <jlayton@...nel.org>,
        Christian Brauner <brauner@...nel.org>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        "open list:NETWORKING [GENERAL]" <netdev@...r.kernel.org>,
        linux-fsdevel <linux-fsdevel@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Linux Memory Management List <linux-mm@...ck.org>,
        Trond Myklebust <trond.myklebust@...merspace.com>,
        Anna Schumaker <anna@...nel.org>,
        Linux NFS Mailing List <linux-nfs@...r.kernel.org>
Subject: Re: [RFC PATCH v2 40/48] sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather
 then sendpage



> On Mar 29, 2023, at 10:13 AM, David Howells <dhowells@...hat.com> wrote:
> 
> When transmitting data, call down into TCP using a single sendmsg with
> MSG_SPLICE_PAGES to indicate that content should be spliced rather than
> performing several sendmsg and sendpage calls to transmit header, data
> pages and trailer.
> 
> To make this work, the data is assembled in a bio_vec array and attached to
> a BVEC-type iterator.  The header and trailer are copied into page
> fragments so that they can be freed with put_page and attached to iterators
> of their own.  An iterator-of-iterators is then created to bridge all three
> iterators (headers, data, trailer) and that is passed to sendmsg to pass
> the entire message in a single call.
> 
> Signed-off-by: David Howells <dhowells@...hat.com>
> cc: Trond Myklebust <trond.myklebust@...merspace.com>
> cc: Anna Schumaker <anna@...nel.org>
> cc: Chuck Lever <chuck.lever@...cle.com>
> cc: Jeff Layton <jlayton@...nel.org>
> cc: "David S. Miller" <davem@...emloft.net>
> cc: Eric Dumazet <edumazet@...gle.com>
> cc: Jakub Kicinski <kuba@...nel.org>
> cc: Paolo Abeni <pabeni@...hat.com>
> cc: Jens Axboe <axboe@...nel.dk>
> cc: Matthew Wilcox <willy@...radead.org>
> cc: linux-nfs@...r.kernel.org
> cc: netdev@...r.kernel.org
> ---
> include/linux/sunrpc/svc.h | 11 +++--
> net/sunrpc/svcsock.c       | 89 +++++++++++++++-----------------------
> 2 files changed, 40 insertions(+), 60 deletions(-)
> 
> diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> index 877891536c2f..456ae554aa11 100644
> --- a/include/linux/sunrpc/svc.h
> +++ b/include/linux/sunrpc/svc.h
> @@ -161,16 +161,15 @@ static inline bool svc_put_not_last(struct svc_serv *serv)
> extern u32 svc_max_payload(const struct svc_rqst *rqstp);
> 
> /*
> - * RPC Requsts and replies are stored in one or more pages.
> + * RPC Requests and replies are stored in one or more pages.
>  * We maintain an array of pages for each server thread.
>  * Requests are copied into these pages as they arrive.  Remaining
>  * pages are available to write the reply into.
>  *
> - * Pages are sent using ->sendpage so each server thread needs to
> - * allocate more to replace those used in sending.  To help keep track
> - * of these pages we have a receive list where all pages initialy live,
> - * and a send list where pages are moved to when there are to be part
> - * of a reply.
> + * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
> + * needs to allocate more to replace those used in sending.  To help keep track
> + * of these pages we have a receive list where all pages initialy live, and a
> + * send list where pages are moved to when there are to be part of a reply.
>  *
>  * We use xdr_buf for holding responses as it fits well with NFS
>  * read responses (that have a header, and some data pages, and possibly
> diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
> index 03a4f5615086..f1cc53aad6e0 100644
> --- a/net/sunrpc/svcsock.c
> +++ b/net/sunrpc/svcsock.c
> @@ -1060,16 +1060,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
> 	return 0;	/* record not complete */
> }
> 
> -static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
> -			      int flags)
> -{
> -	return kernel_sendpage(sock, virt_to_page(vec->iov_base),
> -			       offset_in_page(vec->iov_base),
> -			       vec->iov_len, flags);
> -}
> -
> /*
> - * kernel_sendpage() is used exclusively to reduce the number of
> + * MSG_SPLICE_PAGES is used exclusively to reduce the number of
>  * copy operations in this path. Therefore the caller must ensure
>  * that the pages backing @xdr are unchanging.
>  *
> @@ -1081,65 +1073,54 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
> {
> 	const struct kvec *head = xdr->head;
> 	const struct kvec *tail = xdr->tail;
> -	struct kvec rm = {
> -		.iov_base	= &marker,
> -		.iov_len	= sizeof(marker),
> -	};
> +	struct iov_iter iters[3];
> +	struct bio_vec head_bv, tail_bv;
> 	struct msghdr msg = {
> -		.msg_flags	= 0,
> +		.msg_flags	= MSG_SPLICE_PAGES,
> 	};
> -	int ret;
> +	void *m, *t;
> +	int ret, n = 2, size;
> 
> 	*sentp = 0;
> 	ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
> 	if (ret < 0)
> 		return ret;
> 
> -	ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
> -	if (ret < 0)
> -		return ret;
> -	*sentp += ret;
> -	if (ret != rm.iov_len)
> -		return -EAGAIN;
> +	m = page_frag_alloc(NULL, sizeof(marker) + head->iov_len + tail->iov_len,
> +			    GFP_KERNEL);
> +	if (!m)
> +		return -ENOMEM;

I'm not excited about adding another memory allocation for this
very common case.

It seems to me that you could eliminate the kernel_sendpage()
consumer here in svc_tcp_sendmsg() without also replacing the
kernel_sendmsg() calls. That would be a conservative step-wise
approach which would carry less risk, and would accomplish
your stated goal without more radical surgery.

Later maybe we can find a way to deal with the head, tail, and
record marker without additional memory allocations. I believe
on the server side, head and tail are already in pages, for
example, not in kmalloc'd memory. That would need some code
auditing, but I'm OK with combining these into a single
sock_sendmsg() call once we've worked out the disposition of
the xdr_buf components outside of the bvec. That seems a bit
outside your stated goal.

Simply replacing the kernel_sendpage() loop would be a
straightforward change and easy to evaluate and test, and
I'd welcome that without hesitation.


> -	ret = svc_tcp_send_kvec(sock, head, 0);
> -	if (ret < 0)
> -		return ret;
> -	*sentp += ret;
> -	if (ret != head->iov_len)
> -		goto out;
> +	memcpy(m, &marker, sizeof(marker));
> +	if (head->iov_len)
> +		memcpy(m + sizeof(marker), head->iov_base, head->iov_len);
> +	bvec_set_virt(&head_bv, m, sizeof(marker) + head->iov_len);
> +	iov_iter_bvec(&iters[0], ITER_SOURCE, &head_bv, 1,
> +		      sizeof(marker) + head->iov_len);
> 
> -	if (xdr->page_len) {
> -		unsigned int offset, len, remaining;
> -		struct bio_vec *bvec;
> -
> -		bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
> -		offset = offset_in_page(xdr->page_base);
> -		remaining = xdr->page_len;
> -		while (remaining > 0) {
> -			len = min(remaining, bvec->bv_len - offset);
> -			ret = kernel_sendpage(sock, bvec->bv_page,
> -					      bvec->bv_offset + offset,
> -					      len, 0);
> -			if (ret < 0)
> -				return ret;
> -			*sentp += ret;
> -			if (ret != len)
> -				goto out;
> -			remaining -= len;
> -			offset = 0;
> -			bvec++;
> -		}
> -	}
> +	iov_iter_bvec(&iters[1], ITER_SOURCE, xdr->bvec,
> +		      xdr_buf_pagecount(xdr), xdr->page_len);
> 
> 	if (tail->iov_len) {
> -		ret = svc_tcp_send_kvec(sock, tail, 0);
> -		if (ret < 0)
> -			return ret;
> -		*sentp += ret;
> +		t = page_frag_alloc(NULL, tail->iov_len, GFP_KERNEL);
> +		if (!t)
> +			return -ENOMEM;
> +		memcpy(t, tail->iov_base, tail->iov_len);
> +		bvec_set_virt(&tail_bv,  t, tail->iov_len);
> +		iov_iter_bvec(&iters[2], ITER_SOURCE, &tail_bv, 1, tail->iov_len);
> +		n++;
> 	}
> 
> -out:
> +	size = sizeof(marker) + head->iov_len + xdr->page_len + tail->iov_len;

	size = sizeof(marker) + xdr->len;

If xdr->len != head->iov_len + xdr->page_len + tail->iov_len,
that is a bug these days.


> +	iov_iter_iterlist(&msg.msg_iter, ITER_SOURCE, iters, n, size);
> +
> +	ret = sock_sendmsg(sock, &msg);
> +	if (ret < 0)
> +		return ret;
> +	if (ret > 0)
> +		*sentp = ret;
> +	if (ret != size)
> +		return -EAGAIN;
> 	return 0;
> }
> 
> 

--
Chuck Lever


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ