[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210118151512.itolt7axlxovj267@steredhat>
Date: Mon, 18 Jan 2021 16:15:12 +0100
From: Stefano Garzarella <sgarzare@...hat.com>
To: Arseny Krasnov <arseny.krasnov@...persky.com>
Cc: Stefan Hajnoczi <stefanha@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Andra Paraschiv <andraprs@...zon.com>,
Colin Ian King <colin.king@...onical.com>,
Jeff Vander Stoep <jeffv@...gle.com>, kvm@...r.kernel.org,
virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, stsp2@...dex.ru, oxffffaa@...il.com
Subject: Re: [RFC PATCH v2 10/13] virtio/vsock: update receive logic
On Fri, Jan 15, 2021 at 08:44:07AM +0300, Arseny Krasnov wrote:
>This modifies current receive logic for SEQPACKET support:
>1) Add 'SEQ_BEGIN' packet to socket's rx queue.
>2) Add 'RW' packet to rx queue, but without merging inside buffer of last
> packet in queue.
>3) Perform check for packet type and socket type on receive(if mismatch,
> then reset connection).
>
>Signed-off-by: Arseny Krasnov <arseny.krasnov@...persky.com>
>---
> net/vmw_vsock/virtio_transport_common.c | 79 ++++++++++++++++++-------
> 1 file changed, 58 insertions(+), 21 deletions(-)
>
>diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
>index fe1272e74517..c3e07eb1c666 100644
>--- a/net/vmw_vsock/virtio_transport_common.c
>+++ b/net/vmw_vsock/virtio_transport_common.c
>@@ -397,6 +397,14 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
> return err;
> }
>
>+static u16 virtio_transport_get_type(struct sock *sk)
>+{
>+ if (sk->sk_type == SOCK_STREAM)
>+ return VIRTIO_VSOCK_TYPE_STREAM;
>+ else
>+ return VIRTIO_VSOCK_TYPE_SEQPACKET;
>+}
>+
> static inline void virtio_transport_del_n_free_pkt(struct virtio_vsock_pkt *pkt)
> {
> list_del(&pkt->list);
>@@ -1050,39 +1058,49 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
> struct virtio_vsock_pkt *pkt)
> {
> struct virtio_vsock_sock *vvs = vsk->trans;
>- bool can_enqueue, free_pkt = false;
>+ bool free_pkt = false;
>
> pkt->len = le32_to_cpu(pkt->hdr.len);
> pkt->off = 0;
>
> spin_lock_bh(&vvs->rx_lock);
>
>- can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
>- if (!can_enqueue) {
>+ if (!virtio_transport_inc_rx_pkt(vvs, pkt)) {
> free_pkt = true;
> goto out;
> }
>
>- /* Try to copy small packets into the buffer of last packet queued,
>- * to avoid wasting memory queueing the entire buffer with a small
>- * payload.
>- */
>- if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
>- struct virtio_vsock_pkt *last_pkt;
>+ switch (le32_to_cpu(pkt->hdr.type)) {
^
hdr.type is __le16, so please use le16_to_cpu()
>+ case VIRTIO_VSOCK_TYPE_STREAM: {
>+ /* Try to copy small packets into the buffer of last
>packet queued,
>+ * to avoid wasting memory queueing the entire buffer with a small
>+ * payload.
>+ */
>+ if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
>+ struct virtio_vsock_pkt *last_pkt;
>
>- last_pkt = list_last_entry(&vvs->rx_queue,
>- struct virtio_vsock_pkt, list);
>+ last_pkt = list_last_entry(&vvs->rx_queue,
>+ struct virtio_vsock_pkt, list);
>
>- /* If there is space in the last packet queued, we copy the
>- * new packet in its buffer.
>- */
>- if (pkt->len <= last_pkt->buf_len - last_pkt->len) {
>- memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
>- pkt->len);
>- last_pkt->len += pkt->len;
>- free_pkt = true;
>- goto out;
>+ /* If there is space in the last packet queued, we copy the
>+ * new packet in its buffer.
>+ */
>+ if (pkt->len <= last_pkt->buf_len - last_pkt->len) {
>+ memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
>+ pkt->len);
>+ last_pkt->len += pkt->len;
>+ free_pkt = true;
>+ goto out;
>+ }
> }
>+
>+ break;
>+ }
>+ case VIRTIO_VSOCK_TYPE_SEQPACKET: {
>+ break;
>+ }
>+ default:
>+ goto out;
> }
>
> list_add_tail(&pkt->list, &vvs->rx_queue);
>@@ -1101,6 +1119,14 @@ virtio_transport_recv_connected(struct sock *sk,
> int err = 0;
>
> switch (le16_to_cpu(pkt->hdr.op)) {
>+ case VIRTIO_VSOCK_OP_SEQ_BEGIN: {
>+ struct virtio_vsock_sock *vvs = vsk->trans;
>+
>+ spin_lock_bh(&vvs->rx_lock);
>+ list_add_tail(&pkt->list, &vvs->rx_queue);
>+ spin_unlock_bh(&vvs->rx_lock);
>+ return err;
>+ }
> case VIRTIO_VSOCK_OP_RW:
> virtio_transport_recv_enqueue(vsk, pkt);
> sk->sk_data_ready(sk);
>@@ -1247,6 +1273,12 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt,
> return 0;
> }
>
>+static bool virtio_transport_valid_type(u16 type)
>+{
>+ return (type == VIRTIO_VSOCK_TYPE_STREAM) ||
>+ (type == VIRTIO_VSOCK_TYPE_SEQPACKET);
>+}
>+
> /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
> * lock.
> */
>@@ -1272,7 +1304,7 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
> le32_to_cpu(pkt->hdr.buf_alloc),
> le32_to_cpu(pkt->hdr.fwd_cnt));
>
>- if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
>+ if (!virtio_transport_valid_type(le16_to_cpu(pkt->hdr.type))) {
> (void)virtio_transport_reset_no_sock(t, pkt);
> goto free_pkt;
> }
>@@ -1289,6 +1321,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
> }
> }
>
>+ if (virtio_transport_get_type(sk) != le16_to_cpu(pkt->hdr.type)) {
>+ (void)virtio_transport_reset_no_sock(t, pkt);
>+ goto free_pkt;
>+ }
>+
> vsk = vsock_sk(sk);
>
> space_available = virtio_transport_space_update(sk, pkt);
>--
>2.25.1
>
Powered by blists - more mailing lists