lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 29 Jun 2017 12:02:10 -0700
From:   Todd Kjos <tkjos@...roid.com>
To:     gregkh@...uxfoundation.org, arve@...roid.com,
        devel@...verdev.osuosl.org, linux-kernel@...r.kernel.org,
        maco@...gle.com, tkjos@...gle.com
Subject: [PATCH 36/37] binder: fix death race conditions

From: Martijn Coenen <maco@...gle.com>

A race existed where one thread could register
a death notification for a node, while another
thread was cleaning up that node and sending
out death notifications for its references,
causing simultaneous access to ref->death
because different locks were held.

Signed-off-by: Martijn Coenen <maco@...gle.com>
---
 drivers/android/binder.c | 64 ++++++++++++++++++++++++------------------------
 1 file changed, 32 insertions(+), 32 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 36ef88d10631..1e50b034d49a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -442,6 +442,7 @@ struct binder_ref_data {
  *               ref for deletion in binder_cleanup_ref, a non-NULL
  *               @node indicates the node must be freed
  * @death:       pointer to death notification (ref_death) if requested
+ *               (protected by @node->lock)
  *
  * Structure to track references from procA to target node (on procB). This
  * structure is unsafe to access without holding @proc->outer_lock.
@@ -3337,10 +3338,12 @@ static int binder_thread_write(struct binder_proc *proc,
 				     ref->data.desc, ref->data.strong,
 				     ref->data.weak, ref->node->debug_id);
 
+			binder_node_lock(ref->node);
 			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
 				if (ref->death) {
 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
 						proc->pid, thread->pid);
+					binder_node_unlock(ref->node);
 					binder_proc_unlock(proc);
 					kfree(death);
 					break;
@@ -3349,7 +3352,6 @@ static int binder_thread_write(struct binder_proc *proc,
 				INIT_LIST_HEAD(&death->work.entry);
 				death->cookie = cookie;
 				ref->death = death;
-				binder_node_lock(ref->node);
 				if (ref->node->proc == NULL) {
 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
 					if (thread->looper &
@@ -3368,9 +3370,7 @@ static int binder_thread_write(struct binder_proc *proc,
 								&proc->wait);
 					}
 				}
-				binder_node_unlock(ref->node);
 			} else {
-				binder_node_lock(ref->node);
 				if (ref->death == NULL) {
 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
 						proc->pid, thread->pid);
@@ -3410,8 +3410,8 @@ static int binder_thread_write(struct binder_proc *proc,
 					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
 				}
 				binder_inner_proc_unlock(proc);
-				binder_node_unlock(ref->node);
 			}
+			binder_node_unlock(ref->node);
 			binder_proc_unlock(proc);
 		} break;
 		case BC_DEAD_BINDER_DONE: {
@@ -3748,44 +3748,39 @@ static int binder_thread_read(struct binder_proc *proc,
 		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
 			struct binder_ref_death *death;
 			uint32_t cmd;
+			binder_uintptr_t cookie;
 
 			death = container_of(w, struct binder_ref_death, work);
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 			else
 				cmd = BR_DEAD_BINDER;
-			/*
-			 * TODO: there is a race condition between
-			 * death notification requests and delivery
-			 * of the notifications. This will be handled
-			 * in a later patch.
-			 */
-			binder_inner_proc_unlock(proc);
-			if (put_user(cmd, (uint32_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(uint32_t);
-			if (put_user(death->cookie,
-				     (binder_uintptr_t __user *)ptr))
-				return -EFAULT;
-			ptr += sizeof(binder_uintptr_t);
-			binder_stat_br(proc, thread, cmd);
+			cookie = death->cookie;
+
 			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
 				     "%d:%d %s %016llx\n",
 				      proc->pid, thread->pid,
 				      cmd == BR_DEAD_BINDER ?
 				      "BR_DEAD_BINDER" :
 				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-				      (u64)death->cookie);
-
+				      (u64)cookie);
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+				binder_inner_proc_unlock(proc);
 				kfree(death);
 				binder_stats_deleted(BINDER_STAT_DEATH);
 			} else {
-				binder_inner_proc_lock(proc);
 				binder_enqueue_work_ilocked(
 						w, &proc->delivered_death);
 				binder_inner_proc_unlock(proc);
 			}
+			if (put_user(cmd, (uint32_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(uint32_t);
+			if (put_user(cookie,
+				     (binder_uintptr_t __user *)ptr))
+				return -EFAULT;
+			ptr += sizeof(binder_uintptr_t);
+			binder_stat_br(proc, thread, cmd);
 			if (cmd == BR_DEAD_BINDER)
 				goto done; /* DEAD_BINDER notifications can cause transactions */
 		} break;
@@ -4535,20 +4530,25 @@ static int binder_node_release(struct binder_node *node, int refs)
 
 	hlist_for_each_entry(ref, &node->refs, node_entry) {
 		refs++;
-
-		if (!ref->death)
+		/*
+		 * Need the node lock to synchronize
+		 * with new notification requests and the
+		 * inner lock to synchronize with queued
+		 * death notifications.
+		 */
+		binder_inner_proc_lock(ref->proc);
+		if (!ref->death) {
+			binder_inner_proc_unlock(ref->proc);
 			continue;
+		}
 
 		death++;
 
-		binder_inner_proc_lock(ref->proc);
-		if (list_empty(&ref->death->work.entry)) {
-			ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-			binder_enqueue_work_ilocked(&ref->death->work,
-						    &ref->proc->todo);
-			wake_up_interruptible(&ref->proc->wait);
-		} else
-			BUG();
+		BUG_ON(!list_empty(&ref->death->work.entry));
+		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+		binder_enqueue_work_ilocked(&ref->death->work,
+					    &ref->proc->todo);
+		wake_up_interruptible(&ref->proc->wait);
 		binder_inner_proc_unlock(ref->proc);
 	}
 
-- 
2.13.2.725.g09c95d1e9-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ