[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6ec232ab-fcc6-3afb-8c38-849ad25ef6c5@sberdevices.ru>
Date: Sun, 6 Nov 2022 19:43:59 +0000
From: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
To: Stefano Garzarella <sgarzare@...hat.com>,
Stefan Hajnoczi <stefanha@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
"edumazet@...gle.com" <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Krasnov Arseniy <oxffffaa@...il.com>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"virtualization@...ts.linux-foundation.org"
<virtualization@...ts.linux-foundation.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
kernel <kernel@...rdevices.ru>
Subject: [RFC PATCH v3 05/11] vhost/vsock: switch packet's buffer allocation
This changes packets buffer allocation logic,it depends on whether rx
zerocopy enabled or disabled on destination socket. Thus, now socket
lookup performed here, not in 'virtio_transport_common.c', and for
zerocopy case, buffer is allocated using raw calls to the buddy
allocator. If zerocopy is disabled, then buffers allocated by
'kmalloc()'(like before this patch).
Signed-off-by: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
---
drivers/vhost/vsock.c | 56 +++++++++++++++++++++++++++++++++++--------
1 file changed, 46 insertions(+), 10 deletions(-)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 6f3d9f02cc1d..191a5b94aa7c 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -354,10 +354,14 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
static struct virtio_vsock_pkt *
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
- unsigned int out, unsigned int in)
+ unsigned int out, unsigned int in,
+ struct sock **sk)
{
struct virtio_vsock_pkt *pkt;
+ struct sockaddr_vm src, dst;
+ struct vhost_vsock *vsock;
struct iov_iter iov_iter;
+ struct vsock_sock *vsk;
size_t nbytes;
size_t len;
@@ -381,6 +385,18 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
+ vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port));
+
+ *sk = vsock_find_connected_socket(&src, &dst);
+ if (!(*sk)) {
+ *sk = vsock_find_bound_socket(&dst);
+ if (!(*sk))
+ return pkt;
+ }
+
pkt->len = le32_to_cpu(pkt->hdr.len);
/* No payload */
@@ -393,14 +409,32 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
- if (!pkt->buf) {
- kfree(pkt);
- return NULL;
- }
-
- pkt->slab_buf = true;
pkt->buf_len = pkt->len;
+ vsock = container_of(vq->dev, struct vhost_vsock, dev);
+
+ vsk = vsock_sk(*sk);
+
+ if (!vsk->rx_zerocopy_on) {
+ pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
+
+ if (!pkt->buf) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ pkt->slab_buf = true;
+ } else {
+ struct page *buf_page;
+
+ buf_page = alloc_pages(GFP_KERNEL, get_order(pkt->len));
+
+ if (buf_page == NULL) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ pkt->buf = page_to_virt(buf_page);
+ }
nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
if (nbytes != pkt->len) {
@@ -512,6 +546,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
vhost_disable_notify(&vsock->dev, vq);
do {
+ struct sock *sk = NULL;
+
if (!vhost_vsock_more_replies(vsock)) {
/* Stop tx until the device processes already
* pending replies. Leave tx virtqueue
@@ -533,7 +569,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
break;
}
- pkt = vhost_vsock_alloc_pkt(vq, out, in);
+ pkt = vhost_vsock_alloc_pkt(vq, out, in, &sk);
if (!pkt) {
vq_err(vq, "Faulted on pkt\n");
continue;
@@ -548,7 +584,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
le64_to_cpu(pkt->hdr.dst_cid) ==
vhost_transport_get_local_cid())
- virtio_transport_recv_pkt(&vhost_transport, NULL, pkt);
+ virtio_transport_recv_pkt(&vhost_transport, sk, pkt);
else
virtio_transport_free_pkt(pkt);
--
2.35.0
Powered by blists - more mailing lists