[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210211140319.ptqgrj5nvjn4snc7@steredhat>
Date: Thu, 11 Feb 2021 15:03:19 +0100
From: Stefano Garzarella <sgarzare@...hat.com>
To: Arseny Krasnov <arseny.krasnov@...persky.com>
Cc: Stefan Hajnoczi <stefanha@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Jorgen Hansen <jhansen@...are.com>,
Andra Paraschiv <andraprs@...zon.com>,
Colin Ian King <colin.king@...onical.com>,
Jeff Vander Stoep <jeffv@...gle.com>, kvm@...r.kernel.org,
virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, stsp2@...dex.ru, oxffffaa@...il.com
Subject: Re: [RFC PATCH v4 09/17] virtio/vsock: dequeue callback for
SOCK_SEQPACKET
On Thu, Feb 11, 2021 at 02:54:28PM +0100, Stefano Garzarella wrote:
>On Sun, Feb 07, 2021 at 06:16:46PM +0300, Arseny Krasnov wrote:
>>This adds transport callback and it's logic for SEQPACKET dequeue.
>>Callback fetches RW packets from rx queue of socket until whole record
>>is copied(if user's buffer is full, user is not woken up). This is done
>>to not stall sender, because if we wake up user and it leaves syscall,
>>nobody will send credit update for rest of record, and sender will wait
>>for next enter of read syscall at receiver's side. So if user buffer is
>>full, we just send credit update and drop data. If during copy SEQ_BEGIN
>>was found(and not all data was copied), copying is restarted by reset
>>user's iov iterator(previous unfinished data is dropped).
>>
>>Signed-off-by: Arseny Krasnov <arseny.krasnov@...persky.com>
>>---
>>include/linux/virtio_vsock.h | 5 +
>>include/uapi/linux/virtio_vsock.h | 16 ++++
>>net/vmw_vsock/virtio_transport_common.c | 120 ++++++++++++++++++++++++
>>3 files changed, 141 insertions(+)
>>
>>diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
>>index dc636b727179..4d0de3dee9a4 100644
>>--- a/include/linux/virtio_vsock.h
>>+++ b/include/linux/virtio_vsock.h
>>@@ -36,6 +36,11 @@ struct virtio_vsock_sock {
>> u32 rx_bytes;
>> u32 buf_alloc;
>> struct list_head rx_queue;
>>+
>>+ /* For SOCK_SEQPACKET */
>>+ u32 user_read_seq_len;
>>+ u32 user_read_copied;
>>+ u32 curr_rx_msg_cnt;
>>};
>>
>>struct virtio_vsock_pkt {
>>diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
>>index 1d57ed3d84d2..cf9c165e5cca 100644
>>--- a/include/uapi/linux/virtio_vsock.h
>>+++ b/include/uapi/linux/virtio_vsock.h
>>@@ -63,8 +63,14 @@ struct virtio_vsock_hdr {
>> __le32 fwd_cnt;
>>} __attribute__((packed));
>>
>>+struct virtio_vsock_seq_hdr {
>>+ __le32 msg_cnt;
Maybe it's better 'msg_id' for this field, since we use it to identify a
message. Then whether we use a counter or a random number, I think it's
just an implementation detail.
As Michael said, perhaps this detail should be discussed in the proposal
for VIRTIO spec changes.
>>+ __le32 msg_len;
>>+} __attribute__((packed));
>>+
>>enum virtio_vsock_type {
>> VIRTIO_VSOCK_TYPE_STREAM = 1,
>>+ VIRTIO_VSOCK_TYPE_SEQPACKET = 2,
>>};
>>
>>enum virtio_vsock_op {
>>@@ -83,6 +89,11 @@ enum virtio_vsock_op {
>> VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6,
>> /* Request the peer to send the credit info to us */
>> VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7,
>>+
>>+ /* Record begin for SOCK_SEQPACKET */
>>+ VIRTIO_VSOCK_OP_SEQ_BEGIN = 8,
>>+ /* Record end for SOCK_SEQPACKET */
>>+ VIRTIO_VSOCK_OP_SEQ_END = 9,
>>};
>>
>>/* VIRTIO_VSOCK_OP_SHUTDOWN flags values */
>>@@ -91,4 +102,9 @@ enum virtio_vsock_shutdown {
>> VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
>>};
>>
>>+/* VIRTIO_VSOCK_OP_RW flags values */
>>+enum virtio_vsock_rw {
>>+ VIRTIO_VSOCK_RW_EOR = 1,
>>+};
>>+
>>#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
>>diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
>>index 5956939eebb7..4572d01c8ea5 100644
>>--- a/net/vmw_vsock/virtio_transport_common.c
>>+++ b/net/vmw_vsock/virtio_transport_common.c
>>@@ -397,6 +397,126 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
>> return err;
>>}
>>
>>+static inline void virtio_transport_remove_pkt(struct virtio_vsock_pkt *pkt)
>>+{
>>+ list_del(&pkt->list);
>>+ virtio_transport_free_pkt(pkt);
>>+}
>>+
>>+static size_t virtio_transport_drop_until_seq_begin(struct virtio_vsock_sock *vvs)
>>+{
>
>This function is not used here, but in the next patch, so I'd add this
>with the next patch.
>
>>+ struct virtio_vsock_pkt *pkt, *n;
>>+ size_t bytes_dropped = 0;
>>+
>>+ list_for_each_entry_safe(pkt, n, &vvs->rx_queue, list) {
>>+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_SEQ_BEGIN)
>>+ break;
>>+
>>+ bytes_dropped += le32_to_cpu(pkt->hdr.len);
>>+ virtio_transport_dec_rx_pkt(vvs, pkt);
>>+ virtio_transport_remove_pkt(pkt);
>>+ }
>>+
>>+ return bytes_dropped;
>>+}
>>+
>>+static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
>>+ struct msghdr *msg,
>>+ bool *msg_ready)
>>+{
>
>Also this function is not used, maybe you can add in this patch the
>virtio_transport_seqpacket_dequeue() implementation.
>
>>+ struct virtio_vsock_sock *vvs = vsk->trans;
>>+ struct virtio_vsock_pkt *pkt;
>>+ int err = 0;
>>+ size_t user_buf_len = msg->msg_iter.count;
>>+
>>+ *msg_ready = false;
>>+ spin_lock_bh(&vvs->rx_lock);
>>+
>>+ while (!*msg_ready && !list_empty(&vvs->rx_queue) && !err) {
>>+ pkt = list_first_entry(&vvs->rx_queue, struct virtio_vsock_pkt, list);
>>+
>>+ switch (le16_to_cpu(pkt->hdr.op)) {
>>+ case VIRTIO_VSOCK_OP_SEQ_BEGIN: {
>>+ /* Unexpected 'SEQ_BEGIN' during record copy:
>>+ * Leave receive loop, 'EAGAIN' will restart it from
>>+ * outer receive loop, packet is still in queue and
>>+ * counters are cleared. So in next loop enter,
>>+ * 'SEQ_BEGIN' will be dequeued first. User's iov
>>+ * iterator will be reset in outer loop. Also
>>+ * send credit update, because some bytes could be
>>+ * copied. User will never see unfinished record.
>>+ */
>>+ err = -EAGAIN;
>>+ break;
>>+ }
>>+ case VIRTIO_VSOCK_OP_SEQ_END: {
>>+ struct virtio_vsock_seq_hdr *seq_hdr;
>>+
>>+ seq_hdr = (struct virtio_vsock_seq_hdr *)pkt->buf;
>>+ /* First check that whole record is received. */
>>+
>>+ if (vvs->user_read_copied != vvs->user_read_seq_len ||
>>+ (le32_to_cpu(seq_hdr->msg_cnt) - vvs->curr_rx_msg_cnt) != 1) {
>>+ /* Tail of current record and head of next missed,
>>+ * so this EOR is from next record. Restart receive.
>>+ * Current record will be dropped, next headless will
>>+ * be dropped on next attempt to get record length.
>>+ */
>>+ err = -EAGAIN;
>>+ } else {
>>+ /* Success. */
>>+ *msg_ready = true;
>>+ }
>>+
>>+ break;
>>+ }
>>+ case VIRTIO_VSOCK_OP_RW: {
>>+ size_t bytes_to_copy;
>>+ size_t pkt_len;
>>+
>>+ pkt_len = (size_t)le32_to_cpu(pkt->hdr.len);
>>+ bytes_to_copy = min(user_buf_len, pkt_len);
>>+
>>+ /* sk_lock is held by caller so no one else can dequeue.
>>+ * Unlock rx_lock since memcpy_to_msg() may sleep.
>>+ */
>>+ spin_unlock_bh(&vvs->rx_lock);
>>+
>>+ if (memcpy_to_msg(msg, pkt->buf, bytes_to_copy)) {
>>+ spin_lock_bh(&vvs->rx_lock);
>>+ err = -EINVAL;
>>+ break;
>>+ }
>>+
>>+ spin_lock_bh(&vvs->rx_lock);
>>+ user_buf_len -= bytes_to_copy;
>>+ vvs->user_read_copied += pkt_len;
>>+
>>+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_RW_EOR)
>>+ msg->msg_flags |= MSG_EOR;
>>+ break;
>>+ }
>>+ default:
>>+ ;
>>+ }
>>+
>>+ /* For unexpected 'SEQ_BEGIN', keep such packet in queue,
>>+ * but drop any other type of packet.
>>+ */
>>+ if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_SEQ_BEGIN) {
>>+ virtio_transport_dec_rx_pkt(vvs, pkt);
>>+ virtio_transport_remove_pkt(pkt);
>>+ }
>>+ }
>>+
>>+ spin_unlock_bh(&vvs->rx_lock);
>>+
>>+ virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_SEQPACKET,
>>+ NULL);
>>+
>>+ return err;
>>+}
>>+
>>ssize_t
>>virtio_transport_stream_dequeue(struct vsock_sock *vsk,
>> struct msghdr *msg,
>>--
>>2.25.1
>>
Powered by blists - more mailing lists