lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed,  4 Oct 2017 17:20:07 -0400
From:   Waiman Long <longman@...hat.com>
To:     Alexander Viro <viro@...iv.linux.org.uk>, Jan Kara <jack@...e.com>,
        Jeff Layton <jlayton@...chiereds.net>,
        "J. Bruce Fields" <bfields@...ldses.org>,
        Tejun Heo <tj@...nel.org>,
        Christoph Lameter <cl@...ux-foundation.org>
Cc:     linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Andi Kleen <andi@...stfloor.org>,
        Dave Chinner <dchinner@...hat.com>,
        Boqun Feng <boqun.feng@...il.com>,
        Davidlohr Bueso <dave@...olabs.net>,
        Waiman Long <longman@...hat.com>
Subject: [PATCH v6 6/6] lib/dlock-list: Provide IRQ-safe APIs

To enable the use of dlock-list in an interrupt handler, the following
new APIs are provided for a irqsafe dlock-list:

 - void dlock_list_unlock_irqsafe(struct dlock_list_iter *)
 - void dlock_list_relock_irqsafe(struct dlock_list_iter *)
 - void dlock_list_add_irqsafe(struct dlock_list_node *,
			       struct dlock_list_head *);
 - void dlock_lists_add_irqsafe(struct dlock_list_node *,
				struct dlock_list_heads *)
 - void dlock_lists_del_irqsafe(struct dlock_list_node *)

New macros for irqsafe dlock-list:

 - dlist_for_each_entry_irqsafe(pos, iter, member)
 - dlist_for_each_entry_safe_irqsafe(pos, n, iter, member)

Signed-off-by: Waiman Long <longman@...hat.com>
---
 include/linux/dlock-list.h | 105 ++++++++++++++++++++++++++++++++++++---------
 lib/dlock-list.c           |  89 +++++++++++++++++++++++++++++++++-----
 2 files changed, 164 insertions(+), 30 deletions(-)

diff --git a/include/linux/dlock-list.h b/include/linux/dlock-list.h
index 7afea8f..00f0a29 100644
--- a/include/linux/dlock-list.h
+++ b/include/linux/dlock-list.h
@@ -54,6 +54,7 @@ struct dlock_list_node {
  */
 struct dlock_list_iter {
 	int index;
+	unsigned long flags;
 	struct dlock_list_head *head, *entry;
 };
 
@@ -100,6 +101,24 @@ static inline void dlock_list_relock(struct dlock_list_iter *iter)
 	spin_lock(&iter->entry->lock);
 }
 
+/**
+ * dlock_list_unlock_irqsafe - unlock spinlock that protects the current list
+ * @iter: Pointer to the dlock list iterator structure
+ */
+static inline void dlock_list_unlock_irqsafe(struct dlock_list_iter *iter)
+{
+	spin_unlock_irqrestore(&iter->entry->lock, iter->flags);
+}
+
+/**
+ * dlock_list_relock_irqsafe - lock spinlock that protects the current list
+ * @iter: Pointer to the dlock list iterator structure
+ */
+static inline void dlock_list_relock_irqsafe(struct dlock_list_iter *iter)
+{
+	spin_lock_irqsave(&iter->entry->lock, iter->flags);
+}
+
 /*
  * Allocation and freeing of dlock list
  */
@@ -113,12 +132,15 @@ static inline void dlock_list_relock(struct dlock_list_iter *iter)
 
 /*
  * The dlock list addition and deletion functions here are not irq-safe.
- * Special irq-safe variants will have to be added if we need them.
  */
 extern void dlock_lists_add(struct dlock_list_node *node,
 			    struct dlock_list_heads *dlist);
 extern void dlock_lists_del(struct dlock_list_node *node);
 
+extern void dlock_lists_add_irqsafe(struct dlock_list_node *node,
+				    struct dlock_list_heads *dlist);
+extern void dlock_lists_del_irqsafe(struct dlock_list_node *node);
+
 /*
  * Instead of individual list mapping by CPU number, it can be based on
  * a given context to speed up loockup performance.
@@ -127,24 +149,28 @@ extern struct dlock_list_head *dlock_list_hash(struct dlock_list_heads *dlist,
 					       void *context);
 extern void dlock_list_add(struct dlock_list_node *node,
 			   struct dlock_list_head *head);
+extern void dlock_list_add_irqsafe(struct dlock_list_node *node,
+				   struct dlock_list_head *head);
 
 /*
  * Find the first entry of the next available list.
  */
 extern struct dlock_list_node *
-__dlock_list_next_list(struct dlock_list_iter *iter);
+__dlock_list_next_list(struct dlock_list_iter *iter, bool irqsafe);
 
 /**
  * __dlock_list_next_entry - Iterate to the next entry of the dlock list
- * @curr : Pointer to the current dlock_list_node structure
- * @iter : Pointer to the dlock list iterator structure
+ * @curr   : Pointer to the current dlock_list_node structure
+ * @iter   : Pointer to the dlock list iterator structure
+ * @irqsafe: IRQ safe flag
  * Return: Pointer to the next entry or NULL if all the entries are iterated
  *
  * The iterator has to be properly initialized before calling this function.
  */
 static inline struct dlock_list_node *
 __dlock_list_next_entry(struct dlock_list_node *curr,
-			struct dlock_list_iter *iter)
+			struct dlock_list_iter *iter,
+			bool irqsafe)
 {
 	/*
 	 * Find next entry
@@ -157,7 +183,7 @@ extern void dlock_list_add(struct dlock_list_node *node,
 		 * The current list has been exhausted, try the next available
 		 * list.
 		 */
-		curr = __dlock_list_next_list(iter);
+		curr = __dlock_list_next_list(iter, irqsafe);
 	}
 
 	return curr;	/* Continue the iteration */
@@ -165,31 +191,33 @@ extern void dlock_list_add(struct dlock_list_node *node,
 
 /**
  * dlock_list_first_entry - get the first element from a list
- * @iter  : The dlock list iterator.
- * @type  : The type of the struct this is embedded in.
- * @member: The name of the dlock_list_node within the struct.
+ * @iter   : The dlock list iterator.
+ * @type   : The type of the struct this is embedded in.
+ * @member : The name of the dlock_list_node within the struct.
+ * @irqsafe: IRQ safe flag
  * Return : Pointer to the next entry or NULL if all the entries are iterated.
  */
-#define dlock_list_first_entry(iter, type, member)			\
+#define dlock_list_first_entry(iter, type, member, irqsafe)		\
 	({								\
 		struct dlock_list_node *_n;				\
-		_n = __dlock_list_next_entry(NULL, iter);		\
+		_n = __dlock_list_next_entry(NULL, iter, irqsafe);	\
 		_n ? list_entry(_n, type, member) : NULL;		\
 	})
 
 /**
  * dlock_list_next_entry - iterate to the next entry of the list
- * @pos   : The type * to cursor
- * @iter  : The dlock list iterator.
- * @member: The name of the dlock_list_node within the struct.
+ * @pos    : The type * to cursor
+ * @iter   : The dlock list iterator.
+ * @member : The name of the dlock_list_node within the struct.
+ * @irqsafe: IRQ safe flag
  * Return : Pointer to the next entry or NULL if all the entries are iterated.
  *
  * Note that pos can't be NULL.
  */
-#define dlock_list_next_entry(pos, iter, member)			\
+#define dlock_list_next_entry(pos, iter, member, irqsafe)		\
 	({								\
 		struct dlock_list_node *_n;				\
-		_n = __dlock_list_next_entry(&(pos)->member, iter);	\
+		_n = __dlock_list_next_entry(&(pos)->member, iter, irqsafe);\
 		_n ? list_entry(_n, typeof(*(pos)), member) : NULL;	\
 	})
 
@@ -204,9 +232,9 @@ extern void dlock_list_add(struct dlock_list_node *node,
  * This iteration function is designed to be used in a while loop.
  */
 #define dlist_for_each_entry(pos, iter, member)				\
-	for (pos = dlock_list_first_entry(iter, typeof(*(pos)), member);\
+	for (pos = dlock_list_first_entry(iter, typeof(*(pos)), member, 0);\
 	     pos != NULL;						\
-	     pos = dlock_list_next_entry(pos, iter, member))
+	     pos = dlock_list_next_entry(pos, iter, member, 0))
 
 /**
  * dlist_for_each_entry_safe - iterate over the dlock list & safe over removal
@@ -220,11 +248,48 @@ extern void dlock_list_add(struct dlock_list_node *node,
  * current one.
  */
 #define dlist_for_each_entry_safe(pos, n, iter, member)			\
-	for (pos = dlock_list_first_entry(iter, typeof(*(pos)), member);\
+	for (pos = dlock_list_first_entry(iter, typeof(*(pos)), member, 0);\
+	    ({								\
+		bool _b = (pos != NULL);				\
+		if (_b)							\
+			n = dlock_list_next_entry(pos, iter, member, 0);\
+		_b;							\
+	    });								\
+	    pos = n)
+
+/**
+ * dlist_for_each_entry_irqsafe - iterate over an irqsafe dlock list
+ * @pos   : Type * to use as a loop cursor
+ * @iter  : The dlock list iterator
+ * @member: The name of the dlock_list_node within the struct
+ *
+ * This iteration macro isn't safe with respect to list entry removal, but
+ * it can correctly iterate newly added entries right after the current one.
+ * This iteration function is designed to be used in a while loop.
+ */
+#define dlist_for_each_entry_irqsafe(pos, iter, member)			\
+	for (pos = dlock_list_first_entry(iter, typeof(*(pos)), member, 1);\
+	     pos != NULL;						\
+	     pos = dlock_list_next_entry(pos, iter, member, 1))
+
+/**
+ * dlist_for_each_entry_safe_irqsafe - iterate over an irqsafe dlock list &
+ *			       safe over removal
+ * @pos   : Type * to use as a loop cursor
+ * @n	  : Another type * to use as temporary storage
+ * @iter  : The dlock list iterator
+ * @member: The name of the dlock_list_node within the struct
+ *
+ * This iteration macro is safe with respect to list entry removal.
+ * However, it cannot correctly iterate newly added entries right after the
+ * current one.
+ */
+#define dlist_for_each_entry_safe_irqsafe(pos, n, iter, member)		\
+	for (pos = dlock_list_first_entry(iter, typeof(*(pos)), member, 1);\
 	    ({								\
 		bool _b = (pos != NULL);				\
 		if (_b)							\
-			n = dlock_list_next_entry(pos, iter, member);	\
+			n = dlock_list_next_entry(pos, iter, member, 1);\
 		_b;							\
 	    });								\
 	    pos = n)
diff --git a/lib/dlock-list.c b/lib/dlock-list.c
index e72579f..03d4b98 100644
--- a/lib/dlock-list.c
+++ b/lib/dlock-list.c
@@ -197,6 +197,22 @@ void dlock_list_add(struct dlock_list_node *node,
 }
 
 /**
+ * dlock_list_add_irqsafe - Add node to a particular head of irqsafe dlock list
+ * @node: The node to be added
+ * @head: The dlock list head where the node is to be added
+ */
+void dlock_list_add_irqsafe(struct dlock_list_node *node,
+			    struct dlock_list_head *head)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&head->lock, flags);
+	node->head = head;
+	list_add(&node->list, &head->list);
+	spin_unlock_irqrestore(&head->lock, flags);
+}
+
+/**
  * dlock_lists_add - Adds a node to the given dlock list
  * @node : The node to be added
  * @dlist: The dlock list where the node is to be added
@@ -213,8 +229,24 @@ void dlock_lists_add(struct dlock_list_node *node,
 }
 
 /**
- * dlock_lists_del - Delete a node from a dlock list
- * @node : The node to be deleted
+ * dlock_lists_add_irqsafe - Adds a node to the given irqsafe dlock list
+ * @node : The node to be added
+ * @dlist: The dlock list where the node is to be added
+ *
+ * List selection is based on the CPU being used when the
+ * dlock_list_add_irqsafe() function is called. However, deletion may be
+ * done by a different CPU.
+ */
+void dlock_lists_add_irqsafe(struct dlock_list_node *node,
+			     struct dlock_list_heads *dlist)
+{
+	struct dlock_list_head *head = &dlist->heads[this_cpu_read(cpu2idx)];
+
+	dlock_list_add_irqsafe(node, head);
+}
+
+/*
+ * Delete a node from a dlock list
  *
  * We need to check the lock pointer again after taking the lock to guard
  * against concurrent deletion of the same node. If the lock pointer changes
@@ -222,9 +254,11 @@ void dlock_lists_add(struct dlock_list_node *node,
  * elsewhere. A warning will be printed if this happens as it is likely to be
  * a bug.
  */
-void dlock_lists_del(struct dlock_list_node *node)
+static __always_inline void __dlock_lists_del(struct dlock_list_node *node,
+					      bool irqsafe)
 {
 	struct dlock_list_head *head;
+	unsigned long flags;
 	bool retry;
 
 	do {
@@ -233,7 +267,11 @@ void dlock_lists_del(struct dlock_list_node *node)
 			      __func__, (unsigned long)node))
 			return;
 
-		spin_lock(&head->lock);
+		if (irqsafe)
+			spin_lock_irqsave(&head->lock, flags);
+		else
+			spin_lock(&head->lock);
+
 		if (likely(head == node->head)) {
 			list_del_init(&node->list);
 			node->head = NULL;
@@ -246,26 +284,53 @@ void dlock_lists_del(struct dlock_list_node *node)
 			 */
 			retry = (node->head != NULL);
 		}
-		spin_unlock(&head->lock);
+
+		if (irqsafe)
+			spin_unlock_irqrestore(&head->lock, flags);
+		else
+			spin_unlock(&head->lock);
 	} while (retry);
 }
 
 /**
+ * dlock_lists_del - Delete a node from a dlock list
+ * @node : The node to be deleted
+ */
+void dlock_lists_del(struct dlock_list_node *node)
+{
+	__dlock_lists_del(node, false);
+}
+
+/**
+ * dlock_lists_del_irqsafe - Delete a node from a irqsafe dlock list
+ * @node : The node to be deleted
+ */
+void dlock_lists_del_irqsafe(struct dlock_list_node *node)
+{
+	__dlock_lists_del(node, true);
+}
+
+/**
  * __dlock_list_next_list: Find the first entry of the next available list
- * @dlist: Pointer to the dlock_list_heads structure
- * @iter : Pointer to the dlock list iterator structure
+ * @dlist  : Pointer to the dlock_list_heads structure
+ * @iter   : Pointer to the dlock list iterator structure
+ * @irqsafe: IRQ safe flag
  * Return: true if the entry is found, false if all the lists exhausted
  *
  * The information about the next available list will be put into the iterator.
  */
-struct dlock_list_node *__dlock_list_next_list(struct dlock_list_iter *iter)
+struct dlock_list_node *__dlock_list_next_list(struct dlock_list_iter *iter,
+					       bool irqsafe)
 {
 	struct dlock_list_node *next;
 	struct dlock_list_head *head;
 
 restart:
 	if (iter->entry) {
-		spin_unlock(&iter->entry->lock);
+		if (irqsafe)
+			dlock_list_unlock_irqsafe(iter);
+		else
+			dlock_list_unlock(iter);
 		iter->entry = NULL;
 	}
 
@@ -280,7 +345,11 @@ struct dlock_list_node *__dlock_list_next_list(struct dlock_list_iter *iter)
 		goto next_list;
 
 	head = iter->entry = &iter->head[iter->index];
-	spin_lock(&head->lock);
+	if (irqsafe)
+		dlock_list_relock_irqsafe(iter);
+	else
+		dlock_list_relock(iter);
+
 	/*
 	 * There is a slight chance that the list may become empty just
 	 * before the lock is acquired. So an additional check is
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ