[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <988e9e3c-7993-d6e2-626d-deb46248ed9f@sberdevices.ru>
Date: Thu, 12 May 2022 05:09:19 +0000
From: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
To: Stefan Hajnoczi <stefanha@...hat.com>,
Stefano Garzarella <sgarzare@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
"Jakub Kicinski" <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"virtualization@...ts.linux-foundation.org"
<virtualization@...ts.linux-foundation.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
kernel <kernel@...rdevices.ru>
Subject: [RFC PATCH v1 2/8] vhost/vsock: rework packet allocation logic
For packets received from virtio RX queue, use buddy
allocator instead of 'kmalloc()' to be able to insert
such pages to user provided vma. Single call to
'copy_from_iter()' replaced with per-page loop.
Signed-off-by: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
---
drivers/vhost/vsock.c | 49 ++++++++++++++++++++++++++++++++++++-------
1 file changed, 41 insertions(+), 8 deletions(-)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 37f0b4274113..157798985389 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -360,6 +360,9 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
struct iov_iter iov_iter;
size_t nbytes;
size_t len;
+ struct page *buf_page;
+ ssize_t pkt_len;
+ int page_idx;
if (in != 0) {
vq_err(vq, "Expected 0 input buffers, got %u\n", in);
@@ -393,20 +396,50 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
- if (!pkt->buf) {
+ /* This creates memory overrun, as we allocate
+ * at least one page for each packet.
+ */
+ buf_page = alloc_pages(GFP_KERNEL, get_order(pkt->len));
+
+ if (buf_page == NULL) {
kfree(pkt);
return NULL;
}
+ pkt->buf = page_to_virt(buf_page);
pkt->buf_len = pkt->len;
- nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
- if (nbytes != pkt->len) {
- vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
- pkt->len, nbytes);
- virtio_transport_free_pkt(pkt);
- return NULL;
+ page_idx = 0;
+ pkt_len = pkt->len;
+
+ /* As allocated pages are not mapped, process
+ * pages one by one.
+ */
+ while (pkt_len > 0) {
+ void *mapped;
+ size_t to_copy;
+
+ mapped = kmap(buf_page + page_idx);
+
+ if (mapped == NULL) {
+ virtio_transport_free_pkt(pkt);
+ return NULL;
+ }
+
+ to_copy = min(pkt_len, ((ssize_t)PAGE_SIZE));
+
+ nbytes = copy_from_iter(mapped, to_copy, &iov_iter);
+ if (nbytes != to_copy) {
+ vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
+ to_copy, nbytes);
+ virtio_transport_free_pkt(pkt);
+ return NULL;
+ }
+
+ kunmap(mapped);
+
+ pkt_len -= to_copy;
+ page_idx++;
}
return pkt;
--
2.25.1
Powered by blists - more mailing lists