[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+FuTSfQoDr0jd76xBXSvchhyihQaL2UQXeCR6frJ7hyXxbmVA@mail.gmail.com>
Date: Wed, 9 Dec 2020 09:43:13 -0500
From: Willem de Bruijn <willemdebruijn.kernel@...il.com>
To: wangyunjian <wangyunjian@...wei.com>
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
virtualization@...ts.linux-foundation.org,
Network Development <netdev@...r.kernel.org>,
jerry.lilijun@...wei.com, chenchanghu@...wei.com,
xudingke@...wei.com
Subject: Re: [PATCH net v2] tun: fix ubuf refcount incorrectly on error path
On Wed, Dec 9, 2020 at 8:03 AM wangyunjian <wangyunjian@...wei.com> wrote:
>
> From: Yunjian Wang <wangyunjian@...wei.com>
>
> After setting callback for ubuf_info of skb, the callback
> (vhost_net_zerocopy_callback) will be called to decrease
> the refcount when freeing skb. But when an exception occurs
With exception, you mean if tun_get_user returns an error that
propagates to the sendmsg call in vhost handle_tx, correct?
> afterwards, the error handling in vhost handle_tx() will
> try to decrease the same refcount again. This is wrong and
> fix this by delay copying ubuf_info until we're sure
> there's no errors.
I think the right approach is to address this in the error paths,
rather than complicate the normal datapath.
Is it sufficient to suppress the call to vhost_net_ubuf_put in the
handle_tx sendmsg error path, given that vhost_zerocopy_callback
will be called on kfree_skb?
Or alternatively clear the destructor in drop:
>
> Fixes: 4477138fa0ae ("tun: properly test for IFF_UP")
> Fixes: 90e33d459407 ("tun: enable napi_gro_frags() for TUN/TAP driver")
>
> Signed-off-by: Yunjian Wang <wangyunjian@...wei.com>
> ---
> v2:
> Updated code, fix by delay copying ubuf_info
> ---
> drivers/net/tun.c | 29 +++++++++++++++++++----------
> 1 file changed, 19 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index 2dc1988a8973..2ea822328e73 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -1637,6 +1637,20 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
> return NULL;
> }
>
> +/* copy ubuf_info for callback when skb has no error */
> +static inline void tun_copy_ubuf_info(struct sk_buff *skb, bool zerocopy, void *msg_control)
> +{
> + if (zerocopy) {
> + skb_shinfo(skb)->destructor_arg = msg_control;
> + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
> + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
> + } else if (msg_control) {
> + struct ubuf_info *uarg = msg_control;
> +
> + uarg->callback(uarg, false);
> + }
> +}
> +
> /* Get packet from user space buffer */
> static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> void *msg_control, struct iov_iter *from,
> @@ -1812,16 +1826,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> break;
> }
>
> - /* copy skb_ubuf_info for callback when skb has no error */
> - if (zerocopy) {
> - skb_shinfo(skb)->destructor_arg = msg_control;
> - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
> - skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
> - } else if (msg_control) {
> - struct ubuf_info *uarg = msg_control;
> - uarg->callback(uarg, false);
> - }
> -
> skb_reset_network_header(skb);
> skb_probe_transport_header(skb);
> skb_record_rx_queue(skb, tfile->queue_index);
> @@ -1830,6 +1834,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> struct bpf_prog *xdp_prog;
> int ret;
>
> + tun_copy_ubuf_info(skb, zerocopy, msg_control);
> local_bh_disable();
> rcu_read_lock();
> xdp_prog = rcu_dereference(tun->xdp_prog);
> @@ -1881,6 +1886,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> return -ENOMEM;
> }
>
> + tun_copy_ubuf_info(skb, zerocopy, msg_control);
> local_bh_disable();
> napi_gro_frags(&tfile->napi);
> local_bh_enable();
> @@ -1889,6 +1895,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
> struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
> int queue_len;
>
> + tun_copy_ubuf_info(skb, zerocopy, msg_control);
> spin_lock_bh(&queue->lock);
> __skb_queue_tail(queue, skb);
> queue_len = skb_queue_len(queue);
> @@ -1899,8 +1906,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
>
> local_bh_enable();
> } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
> + tun_copy_ubuf_info(skb, zerocopy, msg_control);
> tun_rx_batched(tun, tfile, skb, more);
> } else {
> + tun_copy_ubuf_info(skb, zerocopy, msg_control);
> netif_rx_ni(skb);
> }
> rcu_read_unlock();
> --
> 2.23.0
>
Powered by blists - more mailing lists