[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230522073950.3574171-2-AVKrasnov@sberdevices.ru>
Date: Mon, 22 May 2023 10:39:34 +0300
From: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
To: Stefan Hajnoczi <stefanha@...hat.com>, Stefano Garzarella
<sgarzare@...hat.com>, "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, "Michael S. Tsirkin" <mst@...hat.com>, Jason Wang
<jasowang@...hat.com>, Bobby Eshleman <bobby.eshleman@...edance.com>
CC: <kvm@...r.kernel.org>, <virtualization@...ts.linux-foundation.org>,
<netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<kernel@...rdevices.ru>, <oxffffaa@...il.com>, <avkrasnov@...rdevices.ru>,
Arseniy Krasnov <AVKrasnov@...rdevices.ru>
Subject: [RFC PATCH v3 01/17] vsock/virtio: read data from non-linear skb
This is preparation patch for non-linear skbuff handling. It replaces
direct calls of 'memcpy_to_msg()' with 'skb_copy_datagram_iter()'. Main
advantage of the second one is that is can handle paged part of the skb
by using 'kmap()' on each page, but if there are no pages in the skb,
it behaves like simple copying to iov iterator. This patch also removes
'skb_pull()' calls, because it updates 'data' pointer of the skb (it is
wrong thing to do with non-linear skb). Instead of updating 'data' and
'len' fields of skb, it adds new field to the control block of the skb:
this value shows current offset to read next data from skb (no matter
that this skb is linear or not), after each read from skb this field is
incremented and once it reaches 'len', skb is considered done.
Signed-off-by: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
---
include/linux/virtio_vsock.h | 1 +
net/vmw_vsock/virtio_transport_common.c | 26 +++++++++++++++++--------
2 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c58453699ee9..17dbb7176e37 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ u32 frag_off;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index e4878551f140..16effa8d55d2 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -355,7 +355,7 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
spin_lock_bh(&vvs->rx_lock);
skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) {
- off = 0;
+ off = VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
if (total == len)
break;
@@ -370,7 +370,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data + off, bytes);
+ err = skb_copy_datagram_iter(skb, off,
+ &msg->msg_iter,
+ bytes);
+
if (err)
goto out;
@@ -414,24 +417,28 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
skb = skb_peek(&vvs->rx_queue);
bytes = len - total;
- if (bytes > skb->len)
- bytes = skb->len;
+ if (bytes > skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off)
+ bytes = skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes);
+ err = skb_copy_datagram_iter(skb,
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off,
+ &msg->msg_iter, bytes);
+
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
- skb_pull(skb, bytes);
- if (skb->len == 0) {
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off += bytes;
+
+ if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->frag_off) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len);
@@ -503,7 +510,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ err = skb_copy_datagram_iter(skb, 0,
+ &msg->msg_iter,
+ bytes_to_copy);
+
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
--
2.25.1
Powered by blists - more mailing lists