[<prev] [next>] [day] [month] [year] [list]
Message-Id: <1581740709-9013-1-git-send-email-qiwuchen55@gmail.com>
Date: Sat, 15 Feb 2020 12:25:09 +0800
From: qiwuchen55@...il.com
To: peterz@...radead.org, mingo@...hat.com, will@...nel.org
Cc: linux-kernel@...r.kernel.org, chenqiwu <chenqiwu@...omi.com>
Subject: [PATCH] locking/osq: use osq_wait_node_unqueue() for coding optimization
From: chenqiwu <chenqiwu@...omi.com>
Define a separate function called osq_wait_node_unqueue() to
integrate the whole code of leaving a node apart from its queue.
Signed-off-by: chenqiwu <chenqiwu@...omi.com>
---
kernel/locking/osq_lock.c | 118 +++++++++++++++++++++++++---------------------
1 file changed, 64 insertions(+), 54 deletions(-)
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 1f77349..6b1c6d2 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -87,10 +87,72 @@ static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
return next;
}
+
+/*
+ * Tree steps to make sure the stability of leaving @node apart from its queue.
+ */
+static inline bool osq_wait_node_unqueue(struct optimistic_spin_queue *lock,
+ struct optimistic_spin_node *node,
+ struct optimistic_spin_node *prev)
+{
+ struct optimistic_spin_node *next;
+
+ /*
+ * Step - A -- stabilize @prev
+ *
+ * Undo our @prev->next assignment; this will make @prev's
+ * unlock()/unqueue() wait for a next pointer since @lock points to us
+ * (or later).
+ */
+ for (;;) {
+ if (prev->next == node &&
+ cmpxchg(&prev->next, node, NULL) == node)
+ break;
+
+ /*
+ * We can only fail the cmpxchg() racing against an unlock(),
+ * in which case we should observe @node->locked to become
+ * true.
+ */
+ if (smp_load_acquire(&node->locked))
+ return true;
+
+ cpu_relax();
+
+ /*
+ * Or we race against a concurrent unqueue()'s step-B, in which
+ * case its step-C will write us a new @node->prev pointer.
+ */
+ prev = READ_ONCE(node->prev);
+ }
+
+ /*
+ * Step - B -- stabilize @next
+ *
+ * Similar to unlock(), wait for @node->next or move @lock from @node
+ * back to @prev.
+ */
+ next = osq_wait_next(lock, node, prev);
+ if (!next)
+ return false;
+
+ /*
+ * Step - C -- unlink
+ *
+ * @prev is stable because its still waiting for a new @prev->next
+ * pointer, @next is stable because our @node->next pointer is NULL and
+ * it will wait in Step-A.
+ */
+ WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(prev->next, next);
+
+ return false;
+}
+
bool osq_lock(struct optimistic_spin_queue *lock)
{
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
- struct optimistic_spin_node *prev, *next;
+ struct optimistic_spin_node *prev;
int curr = encode_cpu(smp_processor_id());
int old;
@@ -145,59 +207,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
return true;
/* unqueue */
- /*
- * Step - A -- stabilize @prev
- *
- * Undo our @prev->next assignment; this will make @prev's
- * unlock()/unqueue() wait for a next pointer since @lock points to us
- * (or later).
- */
-
- for (;;) {
- if (prev->next == node &&
- cmpxchg(&prev->next, node, NULL) == node)
- break;
-
- /*
- * We can only fail the cmpxchg() racing against an unlock(),
- * in which case we should observe @node->locked becomming
- * true.
- */
- if (smp_load_acquire(&node->locked))
- return true;
-
- cpu_relax();
-
- /*
- * Or we race against a concurrent unqueue()'s step-B, in which
- * case its step-C will write us a new @node->prev pointer.
- */
- prev = READ_ONCE(node->prev);
- }
-
- /*
- * Step - B -- stabilize @next
- *
- * Similar to unlock(), wait for @node->next or move @lock from @node
- * back to @prev.
- */
-
- next = osq_wait_next(lock, node, prev);
- if (!next)
- return false;
-
- /*
- * Step - C -- unlink
- *
- * @prev is stable because its still waiting for a new @prev->next
- * pointer, @next is stable because our @node->next pointer is NULL and
- * it will wait in Step-A.
- */
-
- WRITE_ONCE(next->prev, prev);
- WRITE_ONCE(prev->next, next);
-
- return false;
+ return osq_wait_node_unqueue(lock, node, prev);
}
void osq_unlock(struct optimistic_spin_queue *lock)
--
1.9.1
Powered by blists - more mailing lists