[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180108145518.1f39ea095da6dd4fbbf660cb@gmail.com>
Date: Mon, 8 Jan 2018 14:55:18 +0100
From: Vitaly Wool <vitalywool@...il.com>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Arve Hjønnevåg
<arve@...roid.com>, Todd Kjos <tkjos@...roid.com>,
Martijn Coenen <maco@...roid.com>
Cc: Oleksiy.Avramchenko@...y.com, linux-kernel@...r.kernel.org
Subject: [PATCH] binder: use lockless list for deferred_work
Binder uses hlist for deferred list, which isn't a good match.
It's slow and requires mutual exclusion mechanism to protect its
operations. Moreover, having schedule_work() called under a mutex
may cause significant delays and creates noticeable adverse effect
on Binder performance.
Deferred list in Binder is actually treated in a very simple way:
either add an entry to it or delete the first entry from it. llist
(lockless list) is a good match for such usage pattern, and it is
of course quite a bit faster and doesn't require locking.
To be able to add an entry to an llist only if it's not already on
another llist, this patch adds two small helper functions. That is,
llist_add_exclusive would only add a node if it's not already on a
list, and llist_del_first_exclusive will delete the first node off
the list and mark it as not being on any list.
Signed-off-by: Vitaly Vul <vitaly.vul@...y.com>
---
drivers/android/binder.c | 87 ++++++++++++++++++++++++++++++++++++------------
1 file changed, 66 insertions(+), 21 deletions(-)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a7ecfde..acbce1f 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -57,6 +57,7 @@
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -80,8 +81,7 @@
#include "binder_alloc.h"
#include "binder_trace.h"
-static HLIST_HEAD(binder_deferred_list);
-static DEFINE_MUTEX(binder_deferred_lock);
+static LLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
@@ -122,6 +122,57 @@ BINDER_DEBUG_ENTRY(proc);
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+/*
+ * llist extension to allow lockless addition of an entry only if it's
+ * not on any other list
+ */
+#define LLIST_NODE_UNLISTED ((void *)(-1L))
+#define LLIST_NODE_INIT(name) { LLIST_NODE_UNLISTED }
+#define LLIST_NODE(name) struct llist_node name = LLIST_NODE_INIT(name)
+
+static inline void INIT_LLIST_NODE(struct llist_node *node)
+{
+ WRITE_ONCE(node->next, LLIST_NODE_UNLISTED);
+}
+
+/**
+ * llist_del_first_exclusive - delete the first entry of lock-less list
+ * and make sure it's marked as UNLISTED
+ * @head: the head for your lock-less list
+ *
+ * If list is empty, return NULL, otherwise, return the first entry
+ * deleted, this is the newest added one.
+ *
+ */
+static inline struct llist_node *llist_del_first_exclusive(
+ struct llist_head *head)
+{
+ struct llist_node *node = llist_del_first(head);
+
+ if (READ_ONCE(node))
+ smp_store_release(&node->next, LLIST_NODE_UNLISTED);
+ return node;
+}
+
+/**
+ * llist_add_exclusive - add a node only if it's not on any list
+ (i. e. marked as UNLISTED)
+ * @node: the node to be added
+ * @head: the head for your lock-less list
+ *
+ * Return true if the node was added, or false otherwise.
+ */
+static inline bool llist_add_exclusive(struct llist_node *node,
+ struct llist_head *head)
+{
+ if (cmpxchg(&node->next, LLIST_NODE_UNLISTED, NULL) !=
+ LLIST_NODE_UNLISTED)
+ return false;
+
+ llist_add(node, head);
+ return true;
+}
+
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
@@ -485,9 +536,7 @@ enum binder_deferred_state {
* (protected by @files_lock)
* @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
- * (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
- * (protected by binder_deferred_lock)
* @is_dead: process is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @inner_lock)
@@ -532,8 +581,8 @@ struct binder_proc {
struct task_struct *tsk;
struct files_struct *files;
struct mutex files_lock;
- struct hlist_node deferred_work_node;
- int deferred_work;
+ struct llist_node deferred_work_node;
+ atomic_t deferred_work;
bool is_dead;
struct list_head todo;
@@ -4680,6 +4729,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
+ INIT_LLIST_NODE(&proc->deferred_work_node);
+
mutex_lock(&binder_procs_lock);
hlist_add_head(&proc->proc_node, &binder_procs);
mutex_unlock(&binder_procs_lock);
@@ -4900,22 +4951,20 @@ static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
struct files_struct *files;
+ struct llist_node *n;
int defer;
do {
- mutex_lock(&binder_deferred_lock);
- if (!hlist_empty(&binder_deferred_list)) {
- proc = hlist_entry(binder_deferred_list.first,
- struct binder_proc, deferred_work_node);
- hlist_del_init(&proc->deferred_work_node);
- defer = proc->deferred_work;
- proc->deferred_work = 0;
+ n = llist_del_first_exclusive(&binder_deferred_list);
+ if (n) {
+ proc = llist_entry(n, struct binder_proc,
+ deferred_work_node);
+ defer = atomic_xchg(&proc->deferred_work, 0);
} else {
proc = NULL;
defer = 0;
}
- mutex_unlock(&binder_deferred_lock);
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
@@ -4941,14 +4990,10 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
- proc->deferred_work |= defer;
- if (hlist_unhashed(&proc->deferred_work_node)) {
- hlist_add_head(&proc->deferred_work_node,
- &binder_deferred_list);
+ atomic_fetch_or(defer, &proc->deferred_work);
+ if (llist_add_exclusive(&proc->deferred_work_node,
+ &binder_deferred_list))
schedule_work(&binder_deferred_work);
- }
- mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction_ilocked(struct seq_file *m,
--
2.7.4
Powered by blists - more mailing lists