[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230331160914.1608208-54-dhowells@redhat.com>
Date: Fri, 31 Mar 2023 17:09:12 +0100
From: David Howells <dhowells@...hat.com>
To: Matthew Wilcox <willy@...radead.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Jens Axboe <axboe@...nel.dk>, Jeff Layton <jlayton@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Chuck Lever III <chuck.lever@...cle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
netdev@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Philipp Reisner <philipp.reisner@...bit.com>,
Lars Ellenberg <lars.ellenberg@...bit.com>,
Christoph Böhmwalder
<christoph.boehmwalder@...bit.com>, drbd-dev@...ts.linbit.com,
linux-block@...r.kernel.org
Subject: [PATCH v3 53/55] drbd: Use sendmsg(MSG_SPLICE_PAGES) rather than sendmsg()
Use sendmsg() conditionally with MSG_SPLICE_PAGES in _drbd_send_page()
rather than calling sendpage() or _drbd_no_send_page().
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Philipp Reisner <philipp.reisner@...bit.com>
cc: Lars Ellenberg <lars.ellenberg@...bit.com>
cc: "Christoph Böhmwalder" <christoph.boehmwalder@...bit.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: drbd-dev@...ts.linbit.com
cc: linux-block@...r.kernel.org
cc: netdev@...r.kernel.org
---
drivers/block/drbd/drbd_main.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2c764f7ee4a7..e5f90abd29b6 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1532,7 +1532,8 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket = peer_device->connection->data.socket;
- int len = size;
+ struct bio_vec bvec;
+ struct msghdr msg = { .msg_flags = msg_flags, };
int err = -EIO;
/* e.g. XFS meta- & log-data is in slab pages, which have a
@@ -1541,33 +1542,33 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (drbd_disable_sendpage || !sendpage_ok(page))
- return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
+ if (!drbd_disable_sendpage && sendpage_ok(page))
+ msg.msg_flags |= MSG_NOSIGNAL | MSG_SPLICE_PAGES;
+
+ bvec_set_page(&bvec, page, offset, size);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- msg_flags |= MSG_NOSIGNAL;
drbd_update_congested(peer_device->connection);
do {
int sent;
- sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
+ sent = sock_sendmsg(socket, &msg);
if (sent <= 0) {
if (sent == -EAGAIN) {
if (we_should_drop_the_connection(peer_device->connection, socket))
break;
continue;
}
- drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
- __func__, (int)size, len, sent);
+ drbd_warn(peer_device->device, "%s: size=%d len=%zu sent=%d\n",
+ __func__, (int)size, msg_data_left(&msg), sent);
if (sent < 0)
err = sent;
break;
}
- len -= sent;
- offset += sent;
- } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
+ } while (msg_data_left(&msg) /* THINK && device->cstate >= C_CONNECTED*/);
clear_bit(NET_CONGESTED, &peer_device->connection->flags);
- if (len == 0) {
+ if (!msg_data_left(&msg)) {
err = 0;
peer_device->device->send_cnt += size >> 9;
}
Powered by blists - more mailing lists