[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250701164507.14883-7-will@kernel.org>
Date: Tue, 1 Jul 2025 17:45:05 +0100
From: Will Deacon <will@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Will Deacon <will@...nel.org>,
Keir Fraser <keirf@...gle.com>,
Steven Moreland <smoreland@...gle.com>,
Frederick Mayle <fmayle@...gle.com>,
Stefan Hajnoczi <stefanha@...hat.com>,
Stefano Garzarella <sgarzare@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Eugenio Pérez <eperezma@...hat.com>,
netdev@...r.kernel.org,
virtualization@...ts.linux.dev
Subject: [PATCH v2 6/8] vhost/vsock: Allocate nonlinear SKBs for handling large receive buffers
When receiving a packet from a guest, vhost_vsock_handle_tx_kick()
calls vhost_vsock_alloc_linear_skb() to allocate and fill an SKB with
the receive data. Unfortunately, these are always linear allocations and
can therefore result in significant pressure on kmalloc() considering
that the maximum packet size (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE +
VIRTIO_VSOCK_SKB_HEADROOM) is a little over 64KiB, resulting in a 128KiB
allocation for each packet.
Rework the vsock SKB allocation so that, for sizes with page order
greater than PAGE_ALLOC_COSTLY_ORDER, a nonlinear SKB is allocated
instead with the packet header in the SKB and the receive data in the
fragments. Move the VIRTIO_VSOCK_SKB_HEADROOM check out of the
allocation function and into the single caller that needs it and add a
debug warning if virtio_vsock_skb_rx_put() is ever called on an SKB with
a non-zero length, as this would be destructive for the nonlinear case.
Signed-off-by: Will Deacon <will@...nel.org>
---
drivers/vhost/vsock.c | 11 +++++------
include/linux/virtio_vsock.h | 32 +++++++++++++++++++++++++-------
2 files changed, 30 insertions(+), 13 deletions(-)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index b13f6be452ba..f3c2ea1d0ae7 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -344,11 +344,12 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
len = iov_length(vq->iov, out);
- if (len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
+ if (len < VIRTIO_VSOCK_SKB_HEADROOM ||
+ len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
return NULL;
/* len contains both payload and hdr */
- skb = virtio_vsock_alloc_linear_skb(len, GFP_KERNEL);
+ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
@@ -377,10 +378,8 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
virtio_vsock_skb_rx_put(skb);
- nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
- if (nbytes != payload_len) {
- vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
- payload_len, nbytes);
+ if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
+ vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
kfree_skb(skb);
return NULL;
}
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 6d4a933c895a..ad69668f6b91 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -51,29 +51,47 @@ static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
{
u32 len;
+ DEBUG_NET_WARN_ON_ONCE(skb->len);
len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
- skb_put(skb, len);
+
+ if (skb_is_nonlinear(skb))
+ skb->len = len;
+ else
+ skb_put(skb, len);
}
-static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+static inline struct sk_buff *
+__virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
+ unsigned int data_len,
+ gfp_t mask)
{
struct sk_buff *skb;
+ int err;
- if (size < VIRTIO_VSOCK_SKB_HEADROOM)
- return NULL;
-
- skb = alloc_skb(size, mask);
+ skb = alloc_skb_with_frags(header_len, data_len,
+ PAGE_ALLOC_COSTLY_ORDER, &err, mask);
if (!skb)
return NULL;
skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
+ skb->data_len = data_len;
return skb;
}
static inline struct sk_buff *
virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
{
- return virtio_vsock_alloc_skb(size, mask);
+ return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
+}
+
+static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+{
+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ return virtio_vsock_alloc_linear_skb(size, mask);
+
+ size -= VIRTIO_VSOCK_SKB_HEADROOM;
+ return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
+ size, mask);
}
static inline void
--
2.50.0.727.gbf7dc18ff4-goog
Powered by blists - more mailing lists