lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1446035675-23960-1-git-send-email-tomas.winkler@intel.com>
Date:	Wed, 28 Oct 2015 14:34:34 +0200
From:	Tomas Winkler <tomas.winkler@...el.com>
To:	Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc:	Alexander Usyskin <alexander.usyskin@...el.com>,
	linux-kernel@...r.kernel.org,
	Tomas Winkler <tomas.winkler@...el.com>
Subject: [char-misc-next 1/2] mei: bus: use correct lock ordering

The correct lock order is
  cl_bus_lock
    device_lock
      me_clients_rwsem

This order was violated in bus rescan and remove routines
when me_client_rwsem was locked before cl_bus_lock.

Chain exists of:
[    4.321653]   &dev->device_lock --> &dev->me_clients_rwsem -->
&dev->cl_bus_lock
[    4.321653]
[    4.321679]  Possible unsafe locking scenario:
[    4.321679]
[    4.321693]        CPU0                    CPU1
[    4.321701]        ----                    ----
[    4.321709]   lock(&dev->cl_bus_lock);
[    4.321720]
lock(&dev->me_clients_rwsem);
[    4.321733]                                lock(&dev->cl_bus_lock);
[    4.321745]   lock(&dev->device_lock);
[    4.321755]
[    4.321755]  *** DEADLOCK ***
[    4.321755]

Signed-off-by: Tomas Winkler <tomas.winkler@...el.com>
---
 drivers/misc/mei/bus.c | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 160e084ae800..46403e48be4f 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -830,17 +830,20 @@ static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
  * mei_cl_bus_dev_destroy - destroy me client devices object
  *
  * @cldev: me client device
+ *
+ * Locking: called under "dev->cl_bus_lock" lock
  */
 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
 {
+
+	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
+
 	if (!cldev->is_added)
 		return;
 
 	device_del(&cldev->dev);
 
-	mutex_lock(&cldev->bus->cl_bus_lock);
 	list_del_init(&cldev->bus_list);
-	mutex_unlock(&cldev->bus->cl_bus_lock);
 
 	cldev->is_added = 0;
 	put_device(&cldev->dev);
@@ -866,8 +869,10 @@ void mei_cl_bus_remove_devices(struct mei_device *bus)
 {
 	struct mei_cl_device *cldev, *next;
 
+	mutex_lock(&bus->cl_bus_lock);
 	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
 		mei_cl_bus_remove_device(cldev);
+	mutex_unlock(&bus->cl_bus_lock);
 }
 
 
@@ -877,12 +882,16 @@ void mei_cl_bus_remove_devices(struct mei_device *bus)
  *
  * @bus: mei device
  * @me_cl: me client
+ *
+ * Locking: called under "dev->cl_bus_lock" lock
  */
 static void mei_cl_bus_dev_init(struct mei_device *bus,
 				struct mei_me_client *me_cl)
 {
 	struct mei_cl_device *cldev;
 
+	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
+
 	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
 
 	if (me_cl->bus_added)
@@ -892,10 +901,8 @@ static void mei_cl_bus_dev_init(struct mei_device *bus,
 	if (!cldev)
 		return;
 
-	mutex_lock(&cldev->bus->cl_bus_lock);
 	me_cl->bus_added = true;
 	list_add_tail(&cldev->bus_list, &bus->device_list);
-	mutex_unlock(&cldev->bus->cl_bus_lock);
 
 }
 
@@ -910,12 +917,13 @@ void mei_cl_bus_rescan(struct mei_device *bus)
 	struct mei_cl_device *cldev, *n;
 	struct mei_me_client *me_cl;
 
+	mutex_lock(&bus->cl_bus_lock);
+
 	down_read(&bus->me_clients_rwsem);
 	list_for_each_entry(me_cl, &bus->me_clients, list)
 		mei_cl_bus_dev_init(bus, me_cl);
 	up_read(&bus->me_clients_rwsem);
 
-	mutex_lock(&bus->cl_bus_lock);
 	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
 
 		if (!mei_me_cl_is_active(cldev->me_cl)) {
-- 
2.4.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ