[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180409002738.163941-149-alexander.levin@microsoft.com>
Date: Mon, 9 Apr 2018 00:29:46 +0000
From: Sasha Levin <Alexander.Levin@...rosoft.com>
To: "stable@...r.kernel.org" <stable@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: Ross Lagerwall <ross.lagerwall@...rix.com>,
Juergen Gross <jgross@...e.com>,
Sasha Levin <Alexander.Levin@...rosoft.com>
Subject: [PATCH AUTOSEL for 4.4 149/162] xen-netfront: Fix race between device
setup and open
From: Ross Lagerwall <ross.lagerwall@...rix.com>
[ Upstream commit f599c64fdf7d9c108e8717fb04bc41c680120da4 ]
When a netfront device is set up it registers a netdev fairly early on,
before it has set up the queues and is actually usable. A userspace tool
like NetworkManager will immediately try to open it and access its state
as soon as it appears. The bug can be reproduced by hotplugging VIFs
until the VM runs out of grant refs. It registers the netdev but fails
to set up any queues (since there are no more grant refs). In the
meantime, NetworkManager opens the device and the kernel crashes trying
to access the queues (of which there are none).
Fix this in two ways:
* For initial setup, register the netdev much later, after the queues
are setup. This avoids the race entirely.
* During a suspend/resume cycle, the frontend reconnects to the backend
and the queues are recreated. It is possible (though highly unlikely) to
race with something opening the device and accessing the queues after
they have been destroyed but before they have been recreated. Extend the
region covered by the rtnl semaphore to protect against this race. There
is a possibility that we fail to recreate the queues so check for this
in the open function.
Signed-off-by: Ross Lagerwall <ross.lagerwall@...rix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Signed-off-by: Juergen Gross <jgross@...e.com>
Signed-off-by: Sasha Levin <alexander.levin@...rosoft.com>
---
drivers/net/xen-netfront.c | 46 ++++++++++++++++++++++++----------------------
1 file changed, 24 insertions(+), 22 deletions(-)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0b8d2655985f..d612ed0a821c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -342,6 +342,9 @@ static int xennet_open(struct net_device *dev)
unsigned int i = 0;
struct netfront_queue *queue = NULL;
+ if (!np->queues)
+ return -ENODEV;
+
for (i = 0; i < num_queues; ++i) {
queue = &np->queues[i];
napi_enable(&queue->napi);
@@ -1363,18 +1366,8 @@ static int netfront_probe(struct xenbus_device *dev,
#ifdef CONFIG_SYSFS
info->netdev->sysfs_groups[0] = &xennet_dev_group;
#endif
- err = register_netdev(info->netdev);
- if (err) {
- pr_warn("%s: register_netdev err=%d\n", __func__, err);
- goto fail;
- }
return 0;
-
- fail:
- xennet_free_netdev(netdev);
- dev_set_drvdata(&dev->dev, NULL);
- return err;
}
static void xennet_end_access(int ref, void *page)
@@ -1743,8 +1736,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
{
unsigned int i;
- rtnl_lock();
-
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
@@ -1753,8 +1744,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
netif_napi_del(&queue->napi);
}
- rtnl_unlock();
-
kfree(info->queues);
info->queues = NULL;
}
@@ -1770,8 +1759,6 @@ static int xennet_create_queues(struct netfront_info *info,
if (!info->queues)
return -ENOMEM;
- rtnl_lock();
-
for (i = 0; i < *num_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
@@ -1780,7 +1767,7 @@ static int xennet_create_queues(struct netfront_info *info,
ret = xennet_init_queue(queue);
if (ret < 0) {
- dev_warn(&info->netdev->dev,
+ dev_warn(&info->xbdev->dev,
"only created %d queues\n", i);
*num_queues = i;
break;
@@ -1794,10 +1781,8 @@ static int xennet_create_queues(struct netfront_info *info,
netif_set_real_num_tx_queues(info->netdev, *num_queues);
- rtnl_unlock();
-
if (*num_queues == 0) {
- dev_err(&info->netdev->dev, "no queues\n");
+ dev_err(&info->xbdev->dev, "no queues\n");
return -EINVAL;
}
return 0;
@@ -1839,6 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev,
goto out;
}
+ rtnl_lock();
if (info->queues)
xennet_destroy_queues(info);
@@ -1849,6 +1835,7 @@ static int talk_to_netback(struct xenbus_device *dev,
info->queues = NULL;
goto out;
}
+ rtnl_unlock();
/* Create shared ring, alloc event channel -- for each queue */
for (i = 0; i < num_queues; ++i) {
@@ -1945,8 +1932,10 @@ abort_transaction_no_dev_fatal:
xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
+ rtnl_lock();
xennet_destroy_queues(info);
out:
+ rtnl_unlock();
device_unregister(&dev->dev);
return err;
}
@@ -1982,6 +1971,15 @@ static int xennet_connect(struct net_device *dev)
netdev_update_features(dev);
rtnl_unlock();
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ err = register_netdev(dev);
+ if (err) {
+ pr_warn("%s: register_netdev err=%d\n", __func__, err);
+ device_unregister(&np->xbdev->dev);
+ return err;
+ }
+ }
+
/*
* All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
@@ -2167,10 +2165,14 @@ static int xennet_remove(struct xenbus_device *dev)
xennet_disconnect_backend(info);
- unregister_netdev(info->netdev);
+ if (info->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(info->netdev);
- if (info->queues)
+ if (info->queues) {
+ rtnl_lock();
xennet_destroy_queues(info);
+ rtnl_unlock();
+ }
xennet_free_netdev(info->netdev);
return 0;
--
2.15.1
Powered by blists - more mailing lists