[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZAilqWqzSgpFNLCy@bullseye>
Date: Wed, 8 Mar 2023 15:11:37 +0000
From: Bobby Eshleman <bobbyeshleman@...il.com>
To: Arseniy Krasnov <avkrasnov@...rdevices.ru>
Cc: Stefan Hajnoczi <stefanha@...hat.com>,
Stefano Garzarella <sgarzare@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Bobby Eshleman <bobby.eshleman@...edance.com>,
kvm@...r.kernel.org, virtualization@...ts.linux-foundation.org,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel@...rdevices.ru, oxffffaa@...il.com
Subject: Re: [PATCH RESEND net v4 1/4] virtio/vsock: don't use skbuff state
to account credit
On Tue, Mar 14, 2023 at 02:05:48PM +0300, Arseniy Krasnov wrote:
> 'skb->len' can vary when we partially read the data, this complicates the
> calculation of credit to be updated in 'virtio_transport_inc_rx_pkt()/
> virtio_transport_dec_rx_pkt()'.
>
> Also in 'virtio_transport_dec_rx_pkt()' we were miscalculating the
> credit since 'skb->len' was redundant.
>
> For these reasons, let's replace the use of skbuff state to calculate new
> 'rx_bytes'/'fwd_cnt' values with explicit value as input argument. This
> makes code more simple, because it is not needed to change skbuff state
> before each call to update 'rx_bytes'/'fwd_cnt'.
>
> Fixes: 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff")
> Signed-off-by: Arseniy Krasnov <AVKrasnov@...rdevices.ru>
> Reviewed-by: Stefano Garzarella <sgarzare@...hat.com>
> ---
> net/vmw_vsock/virtio_transport_common.c | 23 +++++++++++------------
> 1 file changed, 11 insertions(+), 12 deletions(-)
>
> diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
> index a1581c77cf84..618680fd9906 100644
> --- a/net/vmw_vsock/virtio_transport_common.c
> +++ b/net/vmw_vsock/virtio_transport_common.c
> @@ -241,21 +241,18 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
> }
>
> static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
> - struct sk_buff *skb)
> + u32 len)
> {
> - if (vvs->rx_bytes + skb->len > vvs->buf_alloc)
> + if (vvs->rx_bytes + len > vvs->buf_alloc)
> return false;
>
> - vvs->rx_bytes += skb->len;
> + vvs->rx_bytes += len;
> return true;
> }
>
> static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
> - struct sk_buff *skb)
> + u32 len)
> {
> - int len;
> -
> - len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len;
> vvs->rx_bytes -= len;
> vvs->fwd_cnt += len;
> }
> @@ -388,7 +385,9 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
> skb_pull(skb, bytes);
>
> if (skb->len == 0) {
> - virtio_transport_dec_rx_pkt(vvs, skb);
> + u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
> +
> + virtio_transport_dec_rx_pkt(vvs, pkt_len);
> consume_skb(skb);
> } else {
> __skb_queue_head(&vvs->rx_queue, skb);
> @@ -437,17 +436,17 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
>
> while (!msg_ready) {
> struct virtio_vsock_hdr *hdr;
> + size_t pkt_len;
>
> skb = __skb_dequeue(&vvs->rx_queue);
> if (!skb)
> break;
> hdr = virtio_vsock_hdr(skb);
> + pkt_len = (size_t)le32_to_cpu(hdr->len);
>
> if (dequeued_len >= 0) {
> - size_t pkt_len;
> size_t bytes_to_copy;
>
> - pkt_len = (size_t)le32_to_cpu(hdr->len);
> bytes_to_copy = min(user_buf_len, pkt_len);
>
> if (bytes_to_copy) {
> @@ -484,7 +483,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
> msg->msg_flags |= MSG_EOR;
> }
>
> - virtio_transport_dec_rx_pkt(vvs, skb);
> + virtio_transport_dec_rx_pkt(vvs, pkt_len);
> kfree_skb(skb);
> }
>
> @@ -1040,7 +1039,7 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
>
> spin_lock_bh(&vvs->rx_lock);
>
> - can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb);
> + can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
> if (!can_enqueue) {
> free_pkt = true;
> goto out;
> --
> 2.25.1
Acked-by: Bobby Eshleman <bobby.eshleman@...edance.com>
Powered by blists - more mailing lists