[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210118151535.ugjshvej3lrpnp3d@steredhat>
Date: Mon, 18 Jan 2021 16:15:35 +0100
From: Stefano Garzarella <sgarzare@...hat.com>
To: Arseny Krasnov <arseny.krasnov@...persky.com>
Cc: Stefan Hajnoczi <stefanha@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Jorgen Hansen <jhansen@...are.com>,
Colin Ian King <colin.king@...onical.com>,
Andra Paraschiv <andraprs@...zon.com>,
Jeff Vander Stoep <jeffv@...gle.com>, kvm@...r.kernel.org,
virtualization@...ts.linux-foundation.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, stsp2@...dex.ru, oxffffaa@...il.com
Subject: Re: [RFC PATCH v2 11/13] virtio/vsock: rest of SOCK_SEQPACKET support
On Fri, Jan 15, 2021 at 08:44:22AM +0300, Arseny Krasnov wrote:
>This adds rest of logic for SEQPACKET:
>1) Shared functions for packet sending now set valid type of packet
> according socket type.
>2) SEQPACKET specific function like SEQ_BEGIN send and data dequeue.
>3) Ops for virtio transport.
>4) TAP support for SEQPACKET is not so easy if it is necessary to send
> whole record to TAP interface. This could be done by allocating
> new packet when whole record is received, data of record must be
> copied to TAP packet.
>
>Signed-off-by: Arseny Krasnov <arseny.krasnov@...persky.com>
>---
> include/linux/virtio_vsock.h | 7 ++++
> net/vmw_vsock/virtio_transport.c | 4 ++
> net/vmw_vsock/virtio_transport_common.c | 54 ++++++++++++++++++++++---
> 3 files changed, 59 insertions(+), 6 deletions(-)
>
>diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
>index af8705ea8b95..ad9783df97c9 100644
>--- a/include/linux/virtio_vsock.h
>+++ b/include/linux/virtio_vsock.h
>@@ -84,7 +84,14 @@ virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
> struct msghdr *msg,
> size_t len, int flags);
>
>+bool virtio_transport_seqpacket_seq_send_len(struct vsock_sock *vsk, size_t len);
> size_t virtio_transport_seqpacket_seq_get_len(struct vsock_sock *vsk);
>+ssize_t
>+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
>+ struct msghdr *msg,
>+ size_t len,
>+ int type);
>+
> s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
> s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
>
>diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
>index 2700a63ab095..5a7ab1befee8 100644
>--- a/net/vmw_vsock/virtio_transport.c
>+++ b/net/vmw_vsock/virtio_transport.c
>@@ -469,6 +469,10 @@ static struct virtio_transport virtio_transport = {
> .stream_is_active = virtio_transport_stream_is_active,
> .stream_allow = virtio_transport_stream_allow,
>
>+ .seqpacket_seq_send_len = virtio_transport_seqpacket_seq_send_len,
>+ .seqpacket_seq_get_len = virtio_transport_seqpacket_seq_get_len,
>+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
>+
> .notify_poll_in = virtio_transport_notify_poll_in,
> .notify_poll_out = virtio_transport_notify_poll_out,
> .notify_recv_init = virtio_transport_notify_recv_init,
>diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
>index c3e07eb1c666..5fdf1adfdaab 100644
>--- a/net/vmw_vsock/virtio_transport_common.c
>+++ b/net/vmw_vsock/virtio_transport_common.c
>@@ -139,6 +139,7 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
> break;
> case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
> case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
>+ case VIRTIO_VSOCK_OP_SEQ_BEGIN:
> hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
> break;
> default:
>@@ -157,6 +158,10 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
>
> void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
> {
>+ /* TODO: implement tap support for SOCK_SEQPACKET. */
>+ if (le32_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_SEQPACKET)
^
hdr.type is __le16, so please use le16_to_cpu()
>+ return;
>+
> if (pkt->tap_delivered)
> return;
>
>@@ -405,6 +410,19 @@ static u16 virtio_transport_get_type(struct sock *sk)
> return VIRTIO_VSOCK_TYPE_SEQPACKET;
> }
>
>+bool virtio_transport_seqpacket_seq_send_len(struct vsock_sock *vsk, size_t len)
>+{
>+ struct virtio_vsock_pkt_info info = {
>+ .type = VIRTIO_VSOCK_TYPE_SEQPACKET,
>+ .op = VIRTIO_VSOCK_OP_SEQ_BEGIN,
>+ .vsk = vsk,
>+ .flags = len
>+ };
>+
>+ return virtio_transport_send_pkt_info(vsk, &info);
>+}
>+EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_seq_send_len);
>+
> static inline void virtio_transport_del_n_free_pkt(struct virtio_vsock_pkt *pkt)
> {
> list_del(&pkt->list);
>@@ -576,6 +594,18 @@ virtio_transport_stream_dequeue(struct vsock_sock *vsk,
> }
> EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
>
>+ssize_t
>+virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
>+ struct msghdr *msg,
>+ size_t len, int flags)
>+{
>+ if (flags & MSG_PEEK)
>+ return -EOPNOTSUPP;
>+
>+ return virtio_transport_seqpacket_do_dequeue(vsk, msg, len);
>+}
>+EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue);
>+
> int
> virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
> struct msghdr *msg,
>@@ -659,13 +689,15 @@ EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
> void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val)
> {
> struct virtio_vsock_sock *vvs = vsk->trans;
>+ int type;
>
> if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE)
> *val = VIRTIO_VSOCK_MAX_BUF_SIZE;
>
> vvs->buf_alloc = *val;
>
>- virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
>+ type = virtio_transport_get_type(sk_vsock(vsk));
>+ virtio_transport_send_credit_update(vsk, type,
> NULL);
With this change, you can move 'NULL' in the previous line.
> }
> EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size);
>@@ -793,10 +825,11 @@ int virtio_transport_connect(struct vsock_sock *vsk)
> {
> struct virtio_vsock_pkt_info info = {
> .op = VIRTIO_VSOCK_OP_REQUEST,
>- .type = VIRTIO_VSOCK_TYPE_STREAM,
> .vsk = vsk,
> };
>
>+ info.type = virtio_transport_get_type(sk_vsock(vsk));
>+
> return virtio_transport_send_pkt_info(vsk, &info);
> }
> EXPORT_SYMBOL_GPL(virtio_transport_connect);
>@@ -805,7 +838,6 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
> {
> struct virtio_vsock_pkt_info info = {
> .op = VIRTIO_VSOCK_OP_SHUTDOWN,
>- .type = VIRTIO_VSOCK_TYPE_STREAM,
> .flags = (mode & RCV_SHUTDOWN ?
> VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
> (mode & SEND_SHUTDOWN ?
>@@ -813,6 +845,8 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
> .vsk = vsk,
> };
>
>+ info.type = virtio_transport_get_type(sk_vsock(vsk));
>+
> return virtio_transport_send_pkt_info(vsk, &info);
> }
> EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
>@@ -834,12 +868,18 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
> {
> struct virtio_vsock_pkt_info info = {
> .op = VIRTIO_VSOCK_OP_RW,
>- .type = VIRTIO_VSOCK_TYPE_STREAM,
> .msg = msg,
> .pkt_len = len,
> .vsk = vsk,
>+ .flags = 0,
> };
>
>+ info.type = virtio_transport_get_type(sk_vsock(vsk));
>+
>+ if (info.type == VIRTIO_VSOCK_TYPE_SEQPACKET &&
>+ msg->msg_flags & MSG_EOR)
>+ info.flags |= VIRTIO_VSOCK_RW_EOR;
>+
> return virtio_transport_send_pkt_info(vsk, &info);
> }
> EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
>@@ -857,7 +897,6 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
> {
> struct virtio_vsock_pkt_info info = {
> .op = VIRTIO_VSOCK_OP_RST,
>- .type = VIRTIO_VSOCK_TYPE_STREAM,
> .reply = !!pkt,
> .vsk = vsk,
> };
>@@ -866,6 +905,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
> if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
> return 0;
>
>+ info.type = virtio_transport_get_type(sk_vsock(vsk));
>+
> return virtio_transport_send_pkt_info(vsk, &info);
> }
>
>@@ -1177,13 +1218,14 @@ virtio_transport_send_response(struct vsock_sock *vsk,
> {
> struct virtio_vsock_pkt_info info = {
> .op = VIRTIO_VSOCK_OP_RESPONSE,
>- .type = VIRTIO_VSOCK_TYPE_STREAM,
> .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
> .remote_port = le32_to_cpu(pkt->hdr.src_port),
> .reply = true,
> .vsk = vsk,
> };
>
>+ info.type = virtio_transport_get_type(sk_vsock(vsk));
>+
> return virtio_transport_send_pkt_info(vsk, &info);
> }
>
>--
>2.25.1
>
Powered by blists - more mailing lists