lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1423905288-12830-1-git-send-email-tomas.winkler@intel.com>
Date:	Sat, 14 Feb 2015 11:14:48 +0200
From:	Tomas Winkler <tomas.winkler@...el.com>
To:	gregkh@...uxfoundation.org
Cc:	arnd@...db.de, linux-kernel@...r.kernel.org,
	Tomas Winkler <tomas.winkler@...el.com>
Subject: [char-misc-next 02/18 V2] mei: revamp me clients list handling

1. Use rw lock to access the me_clients list

2. Reuse already defined find functions also when
removing particular me client

3. Add wrappers for addition  and deletion

Signed-off-by: Tomas Winkler <tomas.winkler@...el.com>
---
V2: add missing mei_me_cl_put() calls to mei_host_client_init

 drivers/misc/mei/client.c  | 197 +++++++++++++++++++++++++++++++++------------
 drivers/misc/mei/client.h  |   5 +-
 drivers/misc/mei/debugfs.c |  19 +++--
 drivers/misc/mei/hbm.c     |   3 +-
 drivers/misc/mei/init.c    |   1 +
 drivers/misc/mei/mei_dev.h |   2 +
 6 files changed, 165 insertions(+), 62 deletions(-)

diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 9541218c10eb..7370218bb3f7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -48,14 +48,14 @@ void mei_me_cl_init(struct mei_me_client *me_cl)
  */
 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
 {
-	if (me_cl)
-		kref_get(&me_cl->refcnt);
+	if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
+		return me_cl;
 
-	return me_cl;
+	return NULL;
 }
 
 /**
- * mei_me_cl_release - unlink and free me client
+ * mei_me_cl_release - free me client
  *
  * Locking: called under "dev->device_lock" lock
  *
@@ -65,9 +65,10 @@ static void mei_me_cl_release(struct kref *ref)
 {
 	struct mei_me_client *me_cl =
 		container_of(ref, struct mei_me_client, refcnt);
-	list_del(&me_cl->list);
+
 	kfree(me_cl);
 }
+
 /**
  * mei_me_cl_put - decrease me client refcount and free client if necessary
  *
@@ -82,51 +83,146 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
 }
 
 /**
- * mei_me_cl_by_uuid - locate me client by uuid
+ * __mei_me_cl_del  - delete me client form the list and decrease
+ *     reference counter
+ *
+ * @dev: mei device
+ * @me_cl: me client
+ *
+ * Locking: dev->me_clients_rwsem
+ */
+static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
+{
+	if (!me_cl)
+		return;
+
+	list_del(&me_cl->list);
+	mei_me_cl_put(me_cl);
+}
+
+/**
+ * mei_me_cl_add - add me client to the list
+ *
+ * @dev: mei device
+ * @me_cl: me client
+ */
+void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
+{
+	down_write(&dev->me_clients_rwsem);
+	list_add(&me_cl->list, &dev->me_clients);
+	up_write(&dev->me_clients_rwsem);
+}
+
+/**
+ * __mei_me_cl_by_uuid - locate me client by uuid
  *	increases ref count
  *
  * @dev: mei device
  * @uuid: me client uuid
  *
- * Locking: called under "dev->device_lock" lock
- *
  * Return: me client or NULL if not found
+ *
+ * Locking: dev->me_clients_rwsem
  */
-struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
+static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
 					const uuid_le *uuid)
 {
 	struct mei_me_client *me_cl;
+	const uuid_le *pn;
 
-	list_for_each_entry(me_cl, &dev->me_clients, list)
-		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
+	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
+
+	list_for_each_entry(me_cl, &dev->me_clients, list) {
+		pn = &me_cl->props.protocol_name;
+		if (uuid_le_cmp(*uuid, *pn) == 0)
 			return mei_me_cl_get(me_cl);
+	}
 
 	return NULL;
 }
 
 /**
+ * mei_me_cl_by_uuid - locate me client by uuid
+ *	increases ref count
+ *
+ * @dev: mei device
+ * @uuid: me client uuid
+ *
+ * Return: me client or NULL if not found
+ *
+ * Locking: dev->me_clients_rwsem
+ */
+struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
+					const uuid_le *uuid)
+{
+	struct mei_me_client *me_cl;
+
+	down_read(&dev->me_clients_rwsem);
+	me_cl = __mei_me_cl_by_uuid(dev, uuid);
+	up_read(&dev->me_clients_rwsem);
+
+	return me_cl;
+}
+
+/**
  * mei_me_cl_by_id - locate me client by client id
  *	increases ref count
  *
  * @dev: the device structure
  * @client_id: me client id
  *
- * Locking: called under "dev->device_lock" lock
- *
  * Return: me client or NULL if not found
+ *
+ * Locking: dev->me_clients_rwsem
  */
 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
 {
 
+	struct mei_me_client *__me_cl, *me_cl = NULL;
+
+	down_read(&dev->me_clients_rwsem);
+	list_for_each_entry(__me_cl, &dev->me_clients, list) {
+		if (__me_cl->client_id == client_id) {
+			me_cl = mei_me_cl_get(__me_cl);
+			break;
+		}
+	}
+	up_read(&dev->me_clients_rwsem);
+
+	return me_cl;
+}
+
+/**
+ * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
+ *	increases ref count
+ *
+ * @dev: the device structure
+ * @uuid: me client uuid
+ * @client_id: me client id
+ *
+ * Return: me client or null if not found
+ *
+ * Locking: dev->me_clients_rwsem
+ */
+static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
+					   const uuid_le *uuid, u8 client_id)
+{
 	struct mei_me_client *me_cl;
+	const uuid_le *pn;
+
+	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
 
-	list_for_each_entry(me_cl, &dev->me_clients, list)
-		if (me_cl->client_id == client_id)
+	list_for_each_entry(me_cl, &dev->me_clients, list) {
+		pn = &me_cl->props.protocol_name;
+		if (uuid_le_cmp(*uuid, *pn) == 0 &&
+		    me_cl->client_id == client_id)
 			return mei_me_cl_get(me_cl);
+	}
 
 	return NULL;
 }
 
+
 /**
  * mei_me_cl_by_uuid_id - locate me client by client id and uuid
  *	increases ref count
@@ -135,21 +231,18 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
  * @uuid: me client uuid
  * @client_id: me client id
  *
- * Locking: called under "dev->device_lock" lock
- *
- * Return: me client or NULL if not found
+ * Return: me client or null if not found
  */
 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
 					   const uuid_le *uuid, u8 client_id)
 {
 	struct mei_me_client *me_cl;
 
-	list_for_each_entry(me_cl, &dev->me_clients, list)
-		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 &&
-		    me_cl->client_id == client_id)
-			return mei_me_cl_get(me_cl);
+	down_read(&dev->me_clients_rwsem);
+	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
+	up_read(&dev->me_clients_rwsem);
 
-	return NULL;
+	return me_cl;
 }
 
 /**
@@ -162,12 +255,14 @@ struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
  */
 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
 {
-	struct mei_me_client *me_cl, *next;
+	struct mei_me_client *me_cl;
 
 	dev_dbg(dev->dev, "remove %pUl\n", uuid);
-	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
-		if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0)
-			mei_me_cl_put(me_cl);
+
+	down_write(&dev->me_clients_rwsem);
+	me_cl = __mei_me_cl_by_uuid(dev, uuid);
+	__mei_me_cl_del(dev, me_cl);
+	up_write(&dev->me_clients_rwsem);
 }
 
 /**
@@ -181,15 +276,14 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
  */
 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
 {
-	struct mei_me_client *me_cl, *next;
-	const uuid_le *pn;
+	struct mei_me_client *me_cl;
 
 	dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
-	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) {
-		pn =  &me_cl->props.protocol_name;
-		if (me_cl->client_id == id && uuid_le_cmp(*uuid, *pn) == 0)
-			mei_me_cl_put(me_cl);
-	}
+
+	down_write(&dev->me_clients_rwsem);
+	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
+	__mei_me_cl_del(dev, me_cl);
+	up_write(&dev->me_clients_rwsem);
 }
 
 /**
@@ -203,12 +297,12 @@ void mei_me_cl_rm_all(struct mei_device *dev)
 {
 	struct mei_me_client *me_cl, *next;
 
+	down_write(&dev->me_clients_rwsem);
 	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
-			mei_me_cl_put(me_cl);
+		__mei_me_cl_del(dev, me_cl);
+	up_write(&dev->me_clients_rwsem);
 }
 
-
-
 /**
  * mei_cl_cmp_id - tells if the clients are the same
  *
@@ -535,28 +629,31 @@ int mei_cl_unlink(struct mei_cl *cl)
 
 void mei_host_client_init(struct work_struct *work)
 {
-	struct mei_device *dev = container_of(work,
-					      struct mei_device, init_work);
+	struct mei_device *dev =
+		container_of(work, struct mei_device, init_work);
 	struct mei_me_client *me_cl;
-	struct mei_client_properties *props;
 
 	mutex_lock(&dev->device_lock);
 
-	list_for_each_entry(me_cl, &dev->me_clients, list) {
-		props = &me_cl->props;
 
-		if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid))
-			mei_amthif_host_init(dev);
-		else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid))
-			mei_wd_host_init(dev);
-		else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid))
-			mei_nfc_host_init(dev);
+	me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
+	if (me_cl)
+		mei_amthif_host_init(dev);
+	mei_me_cl_put(me_cl);
+
+	me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+	if (me_cl)
+		mei_wd_host_init(dev);
+	mei_me_cl_put(me_cl);
+
+	me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
+	if (me_cl)
+		mei_nfc_host_init(dev);
+	mei_me_cl_put(me_cl);
 
-	}
 
 	dev->dev_state = MEI_DEV_ENABLED;
 	dev->reset_count = 0;
-
 	mutex_unlock(&dev->device_lock);
 
 	pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index cfcde8e97fc4..80386f9c27e9 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -31,7 +31,10 @@ void mei_me_cl_init(struct mei_me_client *me_cl);
 void mei_me_cl_put(struct mei_me_client *me_cl);
 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl);
 
-struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev,
+void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl);
+void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl);
+
+struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
 					const uuid_le *uuid);
 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index b125380ee871..50fc6635fab1 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -28,7 +28,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
 					size_t cnt, loff_t *ppos)
 {
 	struct mei_device *dev = fp->private_data;
-	struct mei_me_client *me_cl, *n;
+	struct mei_me_client *me_cl;
 	size_t bufsz = 1;
 	char *buf;
 	int i = 0;
@@ -38,15 +38,14 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
 #define HDR \
 "  |id|fix|         UUID                       |con|msg len|sb|refc|\n"
 
-	mutex_lock(&dev->device_lock);
-
+	down_read(&dev->me_clients_rwsem);
 	list_for_each_entry(me_cl, &dev->me_clients, list)
 		bufsz++;
 
 	bufsz *= sizeof(HDR) + 1;
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf) {
-		mutex_unlock(&dev->device_lock);
+		up_read(&dev->me_clients_rwsem);
 		return -ENOMEM;
 	}
 
@@ -56,10 +55,9 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
 	if (dev->dev_state != MEI_DEV_ENABLED)
 		goto out;
 
-	list_for_each_entry_safe(me_cl, n, &dev->me_clients, list) {
+	list_for_each_entry(me_cl, &dev->me_clients, list) {
 
-		me_cl = mei_me_cl_get(me_cl);
-		if (me_cl) {
+		if (mei_me_cl_get(me_cl)) {
 			pos += scnprintf(buf + pos, bufsz - pos,
 				"%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
 				i++, me_cl->client_id,
@@ -69,12 +67,13 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
 				me_cl->props.max_msg_length,
 				me_cl->props.single_recv_buf,
 				atomic_read(&me_cl->refcnt.refcount));
-		}
 
-		mei_me_cl_put(me_cl);
+			mei_me_cl_put(me_cl);
+		}
 	}
+
 out:
-	mutex_unlock(&dev->device_lock);
+	up_read(&dev->me_clients_rwsem);
 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
 	kfree(buf);
 	return ret;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index c8412d41e4f1..4f83e9aaa6f9 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -338,7 +338,8 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
 	me_cl->client_id = res->me_addr;
 	me_cl->mei_flow_ctrl_creds = 0;
 
-	list_add(&me_cl->list, &dev->me_clients);
+	mei_me_cl_add(dev, me_cl);
+
 	return 0;
 }
 
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6ad049a08e4d..ad18b0d16f61 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -389,6 +389,7 @@ void mei_device_init(struct mei_device *dev,
 	INIT_LIST_HEAD(&dev->device_list);
 	INIT_LIST_HEAD(&dev->me_clients);
 	mutex_init(&dev->device_lock);
+	init_rwsem(&dev->me_clients_rwsem);
 	init_waitqueue_head(&dev->wait_hw_ready);
 	init_waitqueue_head(&dev->wait_pg);
 	init_waitqueue_head(&dev->wait_hbm_start);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 6c6ce9381535..102cc6603eba 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -460,6 +460,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @version     : HBM protocol version in use
  * @hbm_f_pg_supported : hbm feature pgi protocol
  *
+ * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients_map : FW clients bit map
  * @host_clients_map : host clients id pool
@@ -556,6 +557,7 @@ struct mei_device {
 	struct hbm_version version;
 	unsigned int hbm_f_pg_supported:1;
 
+	struct rw_semaphore me_clients_rwsem;
 	struct list_head me_clients;
 	DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
 	DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ