[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <y5o3b47vimlbzuojy2d5hscewa7ywres7c4yml5zldfvpsjby7@cbi7wfpksu2z>
Date: Tue, 15 Jul 2025 12:00:05 +0200
From: Stefano Garzarella <sgarzare@...hat.com>
To: Will Deacon <will@...nel.org>
Cc: linux-kernel@...r.kernel.org, Keir Fraser <keirf@...gle.com>,
Steven Moreland <smoreland@...gle.com>, Frederick Mayle <fmayle@...gle.com>,
Stefan Hajnoczi <stefanha@...hat.com>, "Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>, Eugenio Pérez <eperezma@...hat.com>,
netdev@...r.kernel.org, virtualization@...ts.linux.dev
Subject: Re: [PATCH v3 7/9] vhost/vsock: Allocate nonlinear SKBs for handling
large receive buffers
On Mon, Jul 14, 2025 at 04:21:01PM +0100, Will Deacon wrote:
>When receiving a packet from a guest, vhost_vsock_handle_tx_kick()
>calls vhost_vsock_alloc_linear_skb() to allocate and fill an SKB with
>the receive data. Unfortunately, these are always linear allocations and
>can therefore result in significant pressure on kmalloc() considering
>that the maximum packet size (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE +
>VIRTIO_VSOCK_SKB_HEADROOM) is a little over 64KiB, resulting in a 128KiB
>allocation for each packet.
>
>Rework the vsock SKB allocation so that, for sizes with page order
>greater than PAGE_ALLOC_COSTLY_ORDER, a nonlinear SKB is allocated
>instead with the packet header in the SKB and the receive data in the
>fragments. Finally, add a debug warning if virtio_vsock_skb_rx_put() is
>ever called on an SKB with a non-zero length, as this would be
>destructive for the nonlinear case.
>
>Signed-off-by: Will Deacon <will@...nel.org>
>---
> drivers/vhost/vsock.c | 8 +++-----
> include/linux/virtio_vsock.h | 40 +++++++++++++++++++++++++++++-------
> 2 files changed, 36 insertions(+), 12 deletions(-)
Reviewed-by: Stefano Garzarella <sgarzare@...hat.com>
>
>diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
>index 24b7547b05a6..0679a706ebc0 100644
>--- a/drivers/vhost/vsock.c
>+++ b/drivers/vhost/vsock.c
>@@ -349,7 +349,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
> return NULL;
>
> /* len contains both payload and hdr */
>- skb = virtio_vsock_alloc_linear_skb(len, GFP_KERNEL);
>+ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
> if (!skb)
> return NULL;
>
>@@ -378,10 +378,8 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
>
> virtio_vsock_skb_rx_put(skb, payload_len);
>
>- nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
>- if (nbytes != payload_len) {
>- vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
>- payload_len, nbytes);
>+ if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
>+ vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
> kfree_skb(skb);
> return NULL;
> }
>diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
>index 36dd0cd55368..fa5934ea9c81 100644
>--- a/include/linux/virtio_vsock.h
>+++ b/include/linux/virtio_vsock.h
>@@ -49,20 +49,46 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
>
> static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb, u32 len)
> {
>- skb_put(skb, len);
>+ DEBUG_NET_WARN_ON_ONCE(skb->len);
>+
>+ if (skb_is_nonlinear(skb))
>+ skb->len = len;
>+ else
>+ skb_put(skb, len);
>+}
>+
>+static inline struct sk_buff *
>+__virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
>+ unsigned int data_len,
>+ gfp_t mask)
>+{
>+ struct sk_buff *skb;
>+ int err;
>+
>+ skb = alloc_skb_with_frags(header_len, data_len,
>+ PAGE_ALLOC_COSTLY_ORDER, &err, mask);
>+ if (!skb)
>+ return NULL;
>+
>+ skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
>+ skb->data_len = data_len;
>+ return skb;
> }
>
> static inline struct sk_buff *
> virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
> {
>- struct sk_buff *skb;
>+ return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
>+}
>
>- skb = alloc_skb(size, mask);
>- if (!skb)
>- return NULL;
>+static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
>+{
>+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
>+ return virtio_vsock_alloc_linear_skb(size, mask);
>
>- skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
>- return skb;
>+ size -= VIRTIO_VSOCK_SKB_HEADROOM;
>+ return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
>+ size, mask);
> }
>
> static inline void
>--
>2.50.0.727.gbf7dc18ff4-goog
>
Powered by blists - more mailing lists