[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2904969.1680682716@warthog.procyon.org.uk>
Date: Wed, 05 Apr 2023 09:18:36 +0100
From: David Howells <dhowells@...hat.com>
To: Bernard Metzler <BMT@...ich.ibm.com>
Cc: dhowells@...hat.com, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Jens Axboe <axboe@...nel.dk>, Jeff Layton <jlayton@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Chuck Lever III <chuck.lever@...cle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
Tom Talpey <tom@...pey.com>,
"linux-rdma@...r.kernel.org" <linux-rdma@...r.kernel.org>
Subject: Re: [PATCH v3 38/55] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit
Bernard Metzler <BMT@...ich.ibm.com> wrote:
> > if (c_tx->state == SIW_SEND_HDR) {
> > if (c_tx->use_sendpage) {
> > @@ -457,10 +350,15 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx,
> > struct socket *s)
> >
>
> Couldn't we now collapse the two header handling paths
> into one, avoiding extra
> 'if (c_tx->use_sendpage) {} else {}' conditions?
Okay, see the attached incremental change.
Note that the calls to page_frag_memdup() I previously added are probably not
going to be necessary as copying unspliceable data is now done in the
protocols (TCP, IP/UDP, UNIX, etc.). See patch 08 for the TCP version.
David
---
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 28076832da20..edf66a97cf5f 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -335,7 +335,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
struct bio_vec bvec[MAX_ARRAY];
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
- void *trl, *t;
+ void *trl;
int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv;
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
@@ -343,25 +343,11 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
pbl_idx = c_tx->pbl_idx;
if (c_tx->state == SIW_SEND_HDR) {
- if (c_tx->use_sendpage) {
- rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE);
- if (rv)
- goto done;
+ void *hdr = &c_tx->pkt.ctrl + c_tx->ctrl_sent;
- c_tx->state = SIW_SEND_DATA;
- } else {
- const void *hdr = &c_tx->pkt.ctrl + c_tx->ctrl_sent;
- void *h;
-
- rv = -ENOMEM;
- hdr_len = c_tx->ctrl_len - c_tx->ctrl_sent;
- h = page_frag_memdup(NULL, hdr, hdr_len, GFP_NOFS,
- ULONG_MAX);
- if (!h)
- goto done;
- bvec_set_virt(&bvec[0], h, hdr_len);
- seg = 1;
- }
+ hdr_len = c_tx->ctrl_len - c_tx->ctrl_sent;
+ bvec_set_virt(&bvec[0], hdr, hdr_len);
+ seg = 1;
}
wqe->processed += data_len;
@@ -466,12 +452,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
trl = &c_tx->trailer.pad[c_tx->ctrl_sent];
trl_len = MAX_TRAILER - c_tx->ctrl_sent;
}
-
- rv = -ENOMEM;
- t = page_frag_memdup(NULL, trl, trl_len, GFP_NOFS, ULONG_MAX);
- if (!t)
- goto done_crc;
- bvec_set_virt(&bvec[seg], t, trl_len);
+ bvec_set_virt(&bvec[seg], trl, trl_len);
data_len = c_tx->bytes_unsent;
@@ -480,7 +461,6 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, seg + 1,
hdr_len + data_len + trl_len);
rv = sock_sendmsg(s, &msg);
-
if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */
wqe->processed -= data_len;
@@ -541,10 +521,6 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
}
done_crc:
c_tx->do_crc = 0;
- if (c_tx->state == SIW_SEND_HDR)
- folio_put(page_folio(bvec[0].bv_page));
- folio_put(page_folio(bvec[seg].bv_page));
-done:
return rv;
}
Powered by blists - more mailing lists