[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230623225513.2732256-2-dhowells@redhat.com>
Date: Fri, 23 Jun 2023 23:54:58 +0100
From: David Howells <dhowells@...hat.com>
To: netdev@...r.kernel.org
Cc: David Howells <dhowells@...hat.com>,
Alexander Duyck <alexander.duyck@...il.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
David Ahern <dsahern@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Jens Axboe <axboe@...nel.dk>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Bernard Metzler <bmt@...ich.ibm.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Leon Romanovsky <leon@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Jakub Sitnicki <jakub@...udflare.com>,
Karsten Graul <kgraul@...ux.ibm.com>,
Wenjia Zhang <wenjia@...ux.ibm.com>,
Jan Karcher <jaka@...ux.ibm.com>,
"D. Wythe" <alibuda@...ux.alibaba.com>,
Tony Lu <tonylu@...ux.alibaba.com>,
Wen Gu <guwen@...ux.alibaba.com>,
Boris Pismenny <borisp@...dia.com>,
Steffen Klassert <steffen.klassert@...unet.com>,
Herbert Xu <herbert@...dor.apana.org.au>, bpf@...r.kernel.org,
linux-s390@...r.kernel.org, linux-rdma@...r.kernel.org
Subject: [PATCH net-next v5 01/16] tcp_bpf, smc, tls, espintcp, siw: Reduce MSG_SENDPAGE_NOTLAST usage
As MSG_SENDPAGE_NOTLAST is being phased out along with sendpage(), don't
use it further in than the sendpage methods, but rather translate it to
MSG_MORE and use that instead.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Willem de Bruijn <willemdebruijn.kernel@...il.com>
cc: Bernard Metzler <bmt@...ich.ibm.com>
cc: Jason Gunthorpe <jgg@...pe.ca>
cc: Leon Romanovsky <leon@...nel.org>
cc: John Fastabend <john.fastabend@...il.com>
cc: Jakub Sitnicki <jakub@...udflare.com>
cc: Eric Dumazet <edumazet@...gle.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: David Ahern <dsahern@...nel.org>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Karsten Graul <kgraul@...ux.ibm.com>
cc: Wenjia Zhang <wenjia@...ux.ibm.com>
cc: Jan Karcher <jaka@...ux.ibm.com>
cc: "D. Wythe" <alibuda@...ux.alibaba.com>
cc: Tony Lu <tonylu@...ux.alibaba.com>
cc: Wen Gu <guwen@...ux.alibaba.com>
cc: Boris Pismenny <borisp@...dia.com>
cc: Steffen Klassert <steffen.klassert@...unet.com>
cc: Herbert Xu <herbert@...dor.apana.org.au>
cc: netdev@...r.kernel.org
cc: bpf@...r.kernel.org
cc: linux-s390@...r.kernel.org
cc: linux-rdma@...r.kernel.org
---
Notes:
ver #3)
- In tcp_bpf, reset msg_flags on each iteration to clear MSG_MORE.
- In tcp_bpf, set MSG_MORE if there's more data in the sk_msg.
drivers/infiniband/sw/siw/siw_qp_tx.c | 5 ++---
net/ipv4/tcp_bpf.c | 5 +++--
net/smc/smc_tx.c | 6 ++++--
net/tls/tls_device.c | 4 ++--
net/xfrm/espintcp.c | 10 ++++++----
5 files changed, 17 insertions(+), 13 deletions(-)
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index ffb16beb6c30..7c7a51d36d0c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -325,8 +325,7 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
{
struct bio_vec bvec;
struct msghdr msg = {
- .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST |
- MSG_SPLICE_PAGES),
+ .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SPLICE_PAGES),
};
struct sock *sk = s->sk;
int i = 0, rv = 0, sent = 0;
@@ -335,7 +334,7 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
if (size + offset <= PAGE_SIZE)
- msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
+ msg.msg_flags &= ~MSG_MORE;
tcp_rate_check_app_limited(sk);
bvec_set_page(&bvec, page[i], bytes, offset);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 5a84053ac62b..31d6005cea9b 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -88,9 +88,9 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
int flags, bool uncharge)
{
+ struct msghdr msghdr = {};
bool apply = apply_bytes;
struct scatterlist *sge;
- struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, };
struct page *page;
int size, ret = 0;
u32 off;
@@ -107,11 +107,12 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
tcp_rate_check_app_limited(sk);
retry:
+ msghdr.msg_flags = flags | MSG_SPLICE_PAGES;
has_tx_ulp = tls_sw_has_ctx_tx(sk);
if (has_tx_ulp)
msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY;
- if (flags & MSG_SENDPAGE_NOTLAST)
+ if (size < sge->length && msg->sg.start != msg->sg.end)
msghdr.msg_flags |= MSG_MORE;
bvec_set_page(&bvec, page, size, off);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 45128443f1f1..9b9e0a190734 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -168,8 +168,7 @@ static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg)
* should known how/when to uncork it.
*/
if ((msg->msg_flags & MSG_MORE ||
- smc_tx_is_corked(smc) ||
- msg->msg_flags & MSG_SENDPAGE_NOTLAST) &&
+ smc_tx_is_corked(smc)) &&
atomic_read(&conn->sndbuf_space))
return true;
@@ -306,6 +305,9 @@ int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset,
struct kvec iov;
int rc;
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ msg.msg_flags |= MSG_MORE;
+
iov.iov_base = kaddr + offset;
iov.iov_len = size;
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b82770f68807..975299d7213b 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -449,7 +449,7 @@ static int tls_push_data(struct sock *sk,
return -sk->sk_err;
flags |= MSG_SENDPAGE_DECRYPTED;
- tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
+ tls_push_record_flags = flags | MSG_MORE;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
if (tls_is_partially_sent_record(tls_ctx)) {
@@ -532,7 +532,7 @@ static int tls_push_data(struct sock *sk,
if (!size) {
last_record:
tls_push_record_flags = flags;
- if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
+ if (flags & MSG_MORE) {
more = true;
break;
}
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index 3504925babdb..d3b3f9e720b3 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -205,13 +205,15 @@ static int espintcp_sendskb_locked(struct sock *sk, struct espintcp_msg *emsg,
static int espintcp_sendskmsg_locked(struct sock *sk,
struct espintcp_msg *emsg, int flags)
{
- struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, };
+ struct msghdr msghdr = {
+ .msg_flags = flags | MSG_SPLICE_PAGES | MSG_MORE,
+ };
struct sk_msg *skmsg = &emsg->skmsg;
+ bool more = flags & MSG_MORE;
struct scatterlist *sg;
int done = 0;
int ret;
- msghdr.msg_flags |= MSG_SENDPAGE_NOTLAST;
sg = &skmsg->sg.data[skmsg->sg.start];
do {
struct bio_vec bvec;
@@ -221,8 +223,8 @@ static int espintcp_sendskmsg_locked(struct sock *sk,
emsg->offset = 0;
- if (sg_is_last(sg))
- msghdr.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
+ if (sg_is_last(sg) && !more)
+ msghdr.msg_flags &= ~MSG_MORE;
p = sg_page(sg);
retry:
Powered by blists - more mailing lists