[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180524093019.778543667@linuxfoundation.org>
Date: Thu, 24 May 2018 11:37:16 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
syzbot+e8b902c3c3fadf0a9dba@...kaller.appspotmail.com,
Eric Dumazet <eric.dumazet@...il.com>,
Cong Wang <xiyou.wangcong@...il.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>
Subject: [PATCH 4.16 011/161] tun: fix use after free for ptr_ring
4.16-stable review patch. If anyone has any objections, please let me know.
------------------
From: Jason Wang <jasowang@...hat.com>
[ Upstream commit b196d88aba8ac72b775137854121097f4c4c6862 ]
We used to initialize ptr_ring during TUNSETIFF, this is because its
size depends on the tx_queue_len of netdevice. And we try to clean it
up when socket were detached from netdevice. A race were spotted when
trying to do uninit during a read which will lead a use after free for
pointer ring. Solving this by always initialize a zero size ptr_ring
in open() and do resizing during TUNSETIFF, and then we can safely do
cleanup during close(). With this, there's no need for the workaround
that was introduced by commit 4df0bfc79904 ("tun: fix a memory leak
for tfile->tx_array").
Reported-by: syzbot+e8b902c3c3fadf0a9dba@...kaller.appspotmail.com
Cc: Eric Dumazet <eric.dumazet@...il.com>
Cc: Cong Wang <xiyou.wangcong@...il.com>
Cc: Michael S. Tsirkin <mst@...hat.com>
Fixes: 1576d9860599 ("tun: switch to use skb array for tx")
Signed-off-by: Jason Wang <jasowang@...hat.com>
Acked-by: Michael S. Tsirkin <mst@...hat.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/net/tun.c | 27 ++++++++++++---------------
1 file changed, 12 insertions(+), 15 deletions(-)
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -680,15 +680,6 @@ static void tun_queue_purge(struct tun_f
skb_queue_purge(&tfile->sk.sk_error_queue);
}
-static void tun_cleanup_tx_ring(struct tun_file *tfile)
-{
- if (tfile->tx_ring.queue) {
- ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
- xdp_rxq_info_unreg(&tfile->xdp_rxq);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
- }
-}
-
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -735,7 +726,8 @@ static void __tun_detach(struct tun_file
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- tun_cleanup_tx_ring(tfile);
+ if (tun)
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
}
}
@@ -775,14 +767,14 @@ static void tun_detach_all(struct net_de
tun_napi_del(tun, tfile);
/* Drop read queue */
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
+ xdp_rxq_info_unreg(&tfile->xdp_rxq);
sock_put(&tfile->sk);
- tun_cleanup_tx_ring(tfile);
}
BUG_ON(tun->numdisabled != 0);
@@ -826,7 +818,8 @@ static int tun_attach(struct tun_struct
}
if (!tfile->detached &&
- ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
+ ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
+ GFP_KERNEL, tun_ptr_free)) {
err = -ENOMEM;
goto out;
}
@@ -3131,6 +3124,11 @@ static int tun_chr_open(struct inode *in
&tun_proto, 0);
if (!tfile)
return -ENOMEM;
+ if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
+ sk_free(&tfile->sk);
+ return -ENOMEM;
+ }
+
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
@@ -3151,8 +3149,6 @@ static int tun_chr_open(struct inode *in
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
- memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
-
return 0;
}
@@ -3161,6 +3157,7 @@ static int tun_chr_close(struct inode *i
struct tun_file *tfile = file->private_data;
tun_detach(tfile, true);
+ ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
return 0;
}
Powered by blists - more mailing lists