[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170629190211.16927-30-tkjos@google.com>
Date: Thu, 29 Jun 2017 12:02:03 -0700
From: Todd Kjos <tkjos@...roid.com>
To: gregkh@...uxfoundation.org, arve@...roid.com,
devel@...verdev.osuosl.org, linux-kernel@...r.kernel.org,
maco@...gle.com, tkjos@...gle.com
Subject: [PATCH 29/37] binder: add spinlock to protect binder_node
node->node_lock is used to protect elements of node. No
need to acquire for fields that are invariant: debug_id,
ptr, cookie.
Signed-off-by: Todd Kjos <tkjos@...gle.com>
---
drivers/android/binder.c | 220 +++++++++++++++++++++++++++++++++++------------
1 file changed, 165 insertions(+), 55 deletions(-)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 5a0389767843..5654187555be 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -318,6 +318,7 @@ struct binder_error {
* @proc: binder_proc that owns this node
* (invariant after initialized)
* @refs: list of references on this node
+ * (protected by @lock)
* @internal_strong_refs: used to take strong references when
* initiating a transaction
* (protected by @proc->inner_lock if @proc
@@ -351,6 +352,7 @@ struct binder_error {
* (protected by @proc->inner_lock if @proc
* and by @lock)
* @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
* @accept_fds: file descriptor operations supported for node
* (invariant after initialized)
* @min_priority: minimum scheduling priority
@@ -432,6 +434,7 @@ struct binder_ref_data {
* @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
* @rb_node_node: node for lookup by @node in proc's rb_tree
* @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
* @proc: binder_proc containing ref
* @node: binder_node of target node. When cleaning up a
* ref for deletion in binder_cleanup_ref, a non-NULL
@@ -707,6 +710,43 @@ _binder_node_unlock(struct binder_node *node, int line)
spin_unlock(&node->lock);
}
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
static bool binder_worklist_empty_ilocked(struct list_head *list)
{
return list_empty(list);
@@ -925,12 +965,14 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
while (*p) {
parent = *p;
@@ -956,6 +998,8 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@@ -972,12 +1016,15 @@ static void binder_free_node(struct binder_node *node)
binder_stats_deleted(BINDER_STAT_NODE);
}
-static int binder_inc_node_ilocked(struct binder_node *node, int strong,
- int internal,
- struct list_head *target_list)
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
{
- if (node->proc)
- BUG_ON(!spin_is_locked(&node->proc->inner_lock));
+ struct binder_proc *proc = node->proc;
+
+ BUG_ON(!spin_is_locked(&node->lock));
+ if (proc)
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -1016,20 +1063,19 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
{
int ret;
- if (node->proc)
- binder_inner_proc_lock(node->proc);
- ret = binder_inc_node_ilocked(node, strong, internal, target_list);
- if (node->proc)
- binder_inner_proc_unlock(node->proc);
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
return ret;
}
-static bool binder_dec_node_ilocked(struct binder_node *node,
- int strong, int internal)
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
{
struct binder_proc *proc = node->proc;
+ BUG_ON(!spin_is_locked(&node->lock));
if (proc)
BUG_ON(!spin_is_locked(&proc->inner_lock));
if (strong) {
@@ -1088,12 +1134,9 @@ static void binder_dec_node(struct binder_node *node, int strong, int internal)
{
bool free_node;
- if (node->proc)
- binder_inner_proc_lock(node->proc);
- free_node = binder_dec_node_ilocked(node, strong, internal);
- if (node->proc)
- binder_inner_proc_unlock(node->proc);
-
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
if (free_node)
binder_free_node(node);
}
@@ -1123,6 +1166,7 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
*/
static void binder_inc_node_tmpref(struct binder_node *node)
{
+ binder_node_lock(node);
if (node->proc)
binder_inner_proc_lock(node->proc);
else
@@ -1132,6 +1176,7 @@ static void binder_inc_node_tmpref(struct binder_node *node)
binder_inner_proc_unlock(node->proc);
else
spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
}
/**
@@ -1144,9 +1189,8 @@ static void binder_dec_node_tmpref(struct binder_node *node)
{
bool free_node;
- if (node->proc)
- binder_inner_proc_lock(node->proc);
- else
+ binder_node_inner_lock(node);
+ if (!node->proc)
spin_lock(&binder_dead_nodes_lock);
node->tmp_refs--;
BUG_ON(node->tmp_refs < 0);
@@ -1158,9 +1202,8 @@ static void binder_dec_node_tmpref(struct binder_node *node)
* causes no actual reference to be released in binder_dec_node().
* If that changes, a change is needed here too.
*/
- free_node = binder_dec_node_ilocked(node, 0, 1);
- if (node->proc)
- binder_inner_proc_unlock(node->proc);
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
if (free_node)
binder_free_node(node);
}
@@ -1264,19 +1307,21 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+
+ binder_node_lock(node);
hlist_add_head(&new_ref->node_entry, &node->refs);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d new ref %d desc %d for node %d\n",
proc->pid, new_ref->data.debug_id, new_ref->data.desc,
node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
static void binder_cleanup_ref(struct binder_ref *ref)
{
bool delete_node = false;
- struct binder_proc *node_proc = ref->node->proc;
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
@@ -1286,15 +1331,13 @@ static void binder_cleanup_ref(struct binder_ref *ref)
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (node_proc)
- binder_inner_proc_lock(node_proc);
+ binder_node_inner_lock(ref->node);
if (ref->data.strong)
- binder_dec_node_ilocked(ref->node, 1, 1);
+ binder_dec_node_nilocked(ref->node, 1, 1);
hlist_del(&ref->node_entry);
- delete_node = binder_dec_node_ilocked(ref->node, 0, 1);
- if (node_proc)
- binder_inner_proc_unlock(node_proc);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
/*
* Clear ref->node unless we want the caller to free the node
*/
@@ -1989,12 +2032,9 @@ static int binder_translate_binder(struct flat_binder_object *fp,
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
@@ -2055,6 +2095,7 @@ static int binder_translate_handle(struct flat_binder_object *fp,
goto done;
}
+ binder_node_lock(node);
if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
@@ -2062,18 +2103,24 @@ static int binder_translate_handle(struct flat_binder_object *fp,
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
fp->binder = node->ptr;
fp->cookie = node->cookie;
- binder_inc_node(node,
- fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
src_rdata.debug_id, src_rdata.desc, node->debug_id,
(u64)node->ptr);
+ binder_node_unlock(node);
} else {
int ret;
struct binder_ref_data dest_rdata;
+ binder_node_unlock(node);
ret = binder_inc_ref_for_node(target_proc, node,
fp->hdr.type == BINDER_TYPE_HANDLE,
NULL, &dest_rdata);
@@ -2381,13 +2428,16 @@ static void binder_transaction(struct binder_proc *proc,
mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
goto err_dead_binder;
}
target_proc->tmp_ref++;
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -2704,6 +2754,7 @@ static void binder_transaction(struct binder_proc *proc,
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
if (target_thread->is_dead)
@@ -2711,6 +2762,7 @@ static void binder_transaction(struct binder_proc *proc,
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
binder_free_transaction(in_reply_to);
+ binder_enqueue_work(target_proc, &t->work, target_list);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
@@ -2721,20 +2773,29 @@ static void binder_transaction(struct binder_proc *proc,
binder_pop_transaction(thread, t);
goto err_dead_proc_or_thread;
}
+ binder_enqueue_work(target_proc, &t->work, target_list);
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_node_lock(target_node);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
+ /*
+ * Test/set of has_async_transaction
+ * must be atomic with enqueue on
+ * async_todo
+ */
if (target_proc->is_dead ||
- (target_thread && target_thread->is_dead))
+ (target_thread && target_thread->is_dead)) {
+ binder_node_unlock(target_node);
goto err_dead_proc_or_thread;
+ }
+ binder_enqueue_work(target_proc, &t->work, target_list);
+ binder_node_unlock(target_node);
}
- t->work.type = BINDER_WORK_TRANSACTION;
- binder_enqueue_work(target_proc, &t->work, target_list);
if (target_wait) {
if (reply || !(tr->flags & TF_ONE_WAY))
wake_up_interruptible_sync(target_wait);
@@ -2913,6 +2974,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
@@ -2940,13 +3002,13 @@ static int binder_thread_write(struct binder_proc *proc,
binder_put_node(node);
break;
}
- binder_inner_proc_lock(proc);
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
- binder_inner_proc_unlock(proc);
+ binder_node_inner_unlock(node);
binder_put_node(node);
break;
}
@@ -2956,20 +3018,22 @@ static int binder_thread_write(struct binder_proc *proc,
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
- binder_inner_proc_unlock(proc);
+ binder_node_inner_unlock(node);
binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_inner_proc_unlock(proc);
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
node->debug_id, node->local_strong_refs,
node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
binder_put_node(node);
break;
}
@@ -3015,9 +3079,9 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_work *w;
buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
BUG_ON(!buf_node->has_async_transaction);
BUG_ON(buf_node->proc != proc);
- binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w)
@@ -3025,7 +3089,7 @@ static int binder_thread_write(struct binder_proc *proc,
else
binder_enqueue_work_ilocked(
w, &thread->todo);
- binder_inner_proc_unlock(proc);
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
@@ -3150,6 +3214,7 @@ static int binder_thread_write(struct binder_proc *proc,
INIT_LIST_HEAD(&death->work.entry);
death->cookie = cookie;
ref->death = death;
+ binder_node_lock(ref->node);
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
if (thread->looper &
@@ -3168,10 +3233,13 @@ static int binder_thread_write(struct binder_proc *proc,
&proc->wait);
}
}
+ binder_node_unlock(ref->node);
} else {
+ binder_node_lock(ref->node);
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
break;
}
death = ref->death;
@@ -3180,6 +3248,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
break;
}
ref->death = NULL;
@@ -3204,6 +3273,7 @@ static int binder_thread_write(struct binder_proc *proc,
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
binder_inner_proc_unlock(proc);
+ binder_node_unlock(ref->node);
}
} break;
case BC_DEAD_BINDER_DONE: {
@@ -3486,6 +3556,17 @@ static int binder_thread_read(struct binder_proc *proc,
(u64)node_cookie);
rb_erase(&node->rb_node, &proc->nodes);
binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
binder_free_node(node);
} else
binder_inner_proc_unlock(proc);
@@ -3973,16 +4054,18 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- new_node = binder_new_node(proc, 0, 0);
+ new_node = binder_new_node(proc, NULL);
if (!new_node) {
ret = -ENOMEM;
goto out;
}
+ binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
new_node->has_strong_ref = 1;
new_node->has_weak_ref = 1;
context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
binder_put_node(new_node);
out:
mutex_unlock(&context->context_mgr_node_lock);
@@ -4245,6 +4328,7 @@ static int binder_node_release(struct binder_node *node, int refs)
binder_release_work(proc, &node->async_todo);
+ binder_node_lock(node);
binder_inner_proc_lock(proc);
binder_dequeue_work_ilocked(&node->work);
/*
@@ -4253,6 +4337,7 @@ static int binder_node_release(struct binder_node *node, int refs)
BUG_ON(!node->tmp_refs);
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
binder_free_node(node);
return refs;
@@ -4289,6 +4374,7 @@ static int binder_node_release(struct binder_node *node, int refs)
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
binder_put_node(node);
return refs;
@@ -4532,12 +4618,15 @@ static void print_binder_thread_ilocked(struct seq_file *m,
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nlocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
int count;
+ WARN_ON(!spin_is_locked(&node->lock));
+
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
@@ -4564,11 +4653,13 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
{
+ binder_node_lock(ref->node);
seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
ref->data.debug_id, ref->data.desc,
ref->node->proc ? "" : "dead ",
ref->node->debug_id, ref->data.strong,
ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -4591,8 +4682,10 @@ static void print_binder_proc(struct seq_file *m,
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
+ binder_node_lock(node);
if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
+ print_binder_node_nlocked(m, node);
+ binder_node_unlock(node);
}
if (print_all) {
for (n = rb_first(&proc->refs_by_desc);
@@ -4764,6 +4857,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
+ struct binder_node *last_node = NULL;
binder_lock(__func__);
@@ -4772,9 +4866,25 @@ static int binder_state_show(struct seq_file *m, void *unused)
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nlocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
--
2.13.2.725.g09c95d1e9-goog
Powered by blists - more mailing lists