[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <78157286-3663-202f-da94-1a17e4ffe819@sberdevices.ru>
Date: Fri, 3 Jun 2022 05:31:00 +0000
From: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
To: Stefano Garzarella <sgarzare@...hat.com>,
Stefan Hajnoczi <stefanha@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jason Wang <jasowang@...hat.com>,
"Jakub Kicinski" <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"virtualization@...ts.linux-foundation.org"
<virtualization@...ts.linux-foundation.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
kernel <kernel@...rdevices.ru>,
Krasnov Arseniy <oxffffaa@...il.com>
Subject: [RFC PATCH v2 1/8] virtio/vsock: rework packet allocation logic
To support zerocopy receive, packet's buffer allocation
is changed: for buffers which could be mapped to user's
vma we can't use 'kmalloc()'(as kernel restricts to map
slab pages to user's vma) and raw buddy allocator now
called. But, for tx packets(such packets won't be mapped
to user), previous 'kmalloc()' way is used, but with special
flag in packet's structure which allows to distinguish
between 'kmalloc()' and raw pages buffers.
Signed-off-by: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
---
include/linux/virtio_vsock.h | 1 +
net/vmw_vsock/virtio_transport.c | 8 ++++++--
net/vmw_vsock/virtio_transport_common.c | 9 ++++++++-
3 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 35d7eedb5e8e..d02cb7aa922f 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -50,6 +50,7 @@ struct virtio_vsock_pkt {
u32 off;
bool reply;
bool tap_delivered;
+ bool slab_buf;
};
struct virtio_vsock_pkt_info {
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index ad64f403536a..19909c1e9ba3 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -255,16 +255,20 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
vq = vsock->vqs[VSOCK_VQ_RX];
do {
+ struct page *buf_page;
+
pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
if (!pkt)
break;
- pkt->buf = kmalloc(buf_len, GFP_KERNEL);
- if (!pkt->buf) {
+ buf_page = alloc_page(GFP_KERNEL);
+
+ if (!buf_page) {
virtio_transport_free_pkt(pkt);
break;
}
+ pkt->buf = page_to_virt(buf_page);
pkt->buf_len = buf_len;
pkt->len = buf_len;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index ec2c2afbf0d0..278567f748f2 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -69,6 +69,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
if (!pkt->buf)
goto out_pkt;
+ pkt->slab_buf = true;
pkt->buf_len = len;
err = memcpy_from_msg(pkt->buf, info->msg, len);
@@ -1342,7 +1343,13 @@ EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
{
- kfree(pkt->buf);
+ if (pkt->buf_len) {
+ if (pkt->slab_buf)
+ kfree(pkt->buf);
+ else
+ free_pages(buf, get_order(pkt->buf_len));
+ }
+
kfree(pkt);
}
EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
--
2.25.1
Powered by blists - more mailing lists