[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210223141504.eojm7kgcpswrez6j@steredhat>
Date: Tue, 23 Feb 2021 15:15:04 +0100
From: Stefano Garzarella <sgarzare@...hat.com>
To: Arseny Krasnov <arseny.krasnov@...persky.com>
Cc: Stefan Hajnoczi <stefanha@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Jorgen Hansen <jhansen@...are.com>,
Norbert Slusarek <nslusarek@....net>,
Andra Paraschiv <andraprs@...zon.com>,
Colin Ian King <colin.king@...onical.com>,
kvm@...r.kernel.org, virtualization@...ts.linux-foundation.org,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
stsp2@...dex.ru, oxffffaa@...il.com
Subject: Re: [RFC PATCH v5 11/19] virtio/vsock: dequeue callback for
SOCK_SEQPACKET
On Thu, Feb 18, 2021 at 08:39:37AM +0300, Arseny Krasnov wrote:
>This adds transport callback and it's logic for SEQPACKET dequeue.
>Callback fetches RW packets from rx queue of socket until whole record
>is copied(if user's buffer is full, user is not woken up). This is done
>to not stall sender, because if we wake up user and it leaves syscall,
>nobody will send credit update for rest of record, and sender will wait
>for next enter of read syscall at receiver's side. So if user buffer is
>full, we just send credit update and drop data. If during copy SEQ_BEGIN
>was found(and not all data was copied), copying is restarted by reset
>user's iov iterator(previous unfinished data is dropped).
>
>Signed-off-by: Arseny Krasnov <arseny.krasnov@...persky.com>
>---
> include/linux/virtio_vsock.h | 10 +++
> include/uapi/linux/virtio_vsock.h | 16 ++++
> net/vmw_vsock/virtio_transport_common.c | 114 ++++++++++++++++++++++++
> 3 files changed, 140 insertions(+)
This patch LGTM, maybe we only need to change 'msg_cnt' as we discussed
on virtio-comment, but let's see if there are any other comments.
>
>diff --git a/include/linux/virtio_vsock.h
>b/include/linux/virtio_vsock.h
>index dc636b727179..003d06ae4a85 100644
>--- a/include/linux/virtio_vsock.h
>+++ b/include/linux/virtio_vsock.h
>@@ -36,6 +36,11 @@ struct virtio_vsock_sock {
> u32 rx_bytes;
> u32 buf_alloc;
> struct list_head rx_queue;
>+
>+ /* For SOCK_SEQPACKET */
>+ u32 user_read_seq_len;
>+ u32 user_read_copied;
>+ u32 curr_rx_msg_cnt;
> };
>
> struct virtio_vsock_pkt {
>@@ -80,6 +85,11 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
> struct msghdr *msg,
> size_t len, int flags);
>
>+int
>+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
>+ struct msghdr *msg,
>+ int flags,
>+ bool *msg_ready);
> s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
> s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
>
>diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
>index 1d57ed3d84d2..cf9c165e5cca 100644
>--- a/include/uapi/linux/virtio_vsock.h
>+++ b/include/uapi/linux/virtio_vsock.h
>@@ -63,8 +63,14 @@ struct virtio_vsock_hdr {
> __le32 fwd_cnt;
> } __attribute__((packed));
>
>+struct virtio_vsock_seq_hdr {
>+ __le32 msg_cnt;
>+ __le32 msg_len;
>+} __attribute__((packed));
>+
> enum virtio_vsock_type {
> VIRTIO_VSOCK_TYPE_STREAM = 1,
>+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
> };
>
> enum virtio_vsock_op {
>@@ -83,6 +89,11 @@ enum virtio_vsock_op {
> VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6,
> /* Request the peer to send the credit info to us */
> VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7,
>+
>+ /* Record begin for SOCK_SEQPACKET */
>+ VIRTIO_VSOCK_OP_SEQ_BEGIN = 8,
>+ /* Record end for SOCK_SEQPACKET */
>+ VIRTIO_VSOCK_OP_SEQ_END = 9,
> };
>
> /* VIRTIO_VSOCK_OP_SHUTDOWN flags values */
>@@ -91,4 +102,9 @@ enum virtio_vsock_shutdown {
> VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
> };
>
>+/* VIRTIO_VSOCK_OP_RW flags values */
>+enum virtio_vsock_rw {
>+ VIRTIO_VSOCK_RW_EOR = 1,
>+};
>+
> #endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
>diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
>index 833104b71a1c..d8ec2dfa2315 100644
>--- a/net/vmw_vsock/virtio_transport_common.c
>+++ b/net/vmw_vsock/virtio_transport_common.c
>@@ -393,6 +393,108 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
> return err;
> }
>
>+static inline void virtio_transport_remove_pkt(struct virtio_vsock_pkt *pkt)
>+{
>+ list_del(&pkt->list);
>+ virtio_transport_free_pkt(pkt);
>+}
>+
>+static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
>+ struct msghdr *msg,
>+ bool *msg_ready)
>+{
>+ struct virtio_vsock_sock *vvs = vsk->trans;
>+ struct virtio_vsock_pkt *pkt;
>+ int err = 0;
>+ size_t user_buf_len = msg->msg_iter.count;
>+
>+ *msg_ready = false;
>+ spin_lock_bh(&vvs->rx_lock);
>+
>+ while (!*msg_ready && !list_empty(&vvs->rx_queue) && !err) {
>+ pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
>+
>+ switch (le16_to_cpu(pkt->hdr.op)) {
>+ case VIRTIO_VSOCK_OP_SEQ_BEGIN: {
>+ /* Unexpected 'SEQ_BEGIN' during record copy:
>+ * Leave receive loop, 'EAGAIN' will restart it from
>+ * outer receive loop, packet is still in queue and
>+ * counters are cleared. So in next loop enter,
>+ * 'SEQ_BEGIN' will be dequeued first. User's iov
>+ * iterator will be reset in outer loop. Also
>+ * send credit update, because some bytes could be
>+ * copied. User will never see unfinished record.
>+ */
>+ err = -EAGAIN;
>+ break;
>+ }
>+ case VIRTIO_VSOCK_OP_SEQ_END: {
>+ struct virtio_vsock_seq_hdr *seq_hdr;
>+
>+ seq_hdr = (struct virtio_vsock_seq_hdr *)pkt->buf;
>+ /* First check that whole record is received. */
>+
>+ if (vvs->user_read_copied != vvs->user_read_seq_len ||
>+ (le32_to_cpu(seq_hdr->msg_cnt) - vvs->curr_rx_msg_cnt) != 1) {
>+ /* Tail of current record and head of next missed,
>+ * so this EOR is from next record. Restart receive.
>+ * Current record will be dropped, next headless will
>+ * be dropped on next attempt to get record length.
>+ */
>+ err = -EAGAIN;
>+ } else {
>+ /* Success. */
>+ *msg_ready = true;
>+ }
>+
>+ break;
>+ }
>+ case VIRTIO_VSOCK_OP_RW: {
>+ size_t bytes_to_copy;
>+ size_t pkt_len;
>+
>+ pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
>+ bytes_to_copy = min(user_buf_len, pkt_len);
>+
>+ /* sk_lock is held by caller so no one else can dequeue.
>+ * Unlock rx_lock since memcpy_to_msg() may sleep.
>+ */
>+ spin_unlock_bh(&vvs->rx_lock);
>+
>+ if (memcpy_to_msg(msg, pkt->buf, bytes_to_copy)) {
>+ spin_lock_bh(&vvs->rx_lock);
>+ err = -EINVAL;
>+ break;
>+ }
>+
>+ spin_lock_bh(&vvs->rx_lock);
>+ user_buf_len -= bytes_to_copy;
>+ vvs->user_read_copied += pkt_len;
>+
>+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_RW_EOR)
>+ msg->msg_flags |= MSG_EOR;
>+ break;
>+ }
>+ default:
>+ ;
>+ }
>+
>+ /* For unexpected 'SEQ_BEGIN', keep such packet in queue,
>+ * but drop any other type of packet.
>+ */
>+ if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_SEQ_BEGIN) {
>+ virtio_transport_dec_rx_pkt(vvs, pkt);
>+ virtio_transport_remove_pkt(pkt);
>+ }
>+ }
>+
>+ spin_unlock_bh(&vvs->rx_lock);
>+
>+ virtio_transport_send_credit_update(vsk);
>+
>+ return err;
>+}
>+
> ssize_t
> virtio_transport_stream_dequeue(struct vsock_sock *vsk,
> struct msghdr *msg,
>@@ -405,6 +507,18 @@ virtio_transport_stream_dequeue(struct vsock_sock *vsk,
> }
> EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
>
>+int
>+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
>+ struct msghdr *msg,
>+ int flags, bool *msg_ready)
>+{
>+ if (flags & MSG_PEEK)
>+ return -EOPNOTSUPP;
>+
>+ return virtio_transport_seqpacket_do_dequeue(vsk, msg, msg_ready);
>+}
>+EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
>+
> int
> virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
> struct msghdr *msg,
>--
>2.25.1
>
Powered by blists - more mailing lists