lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251215194541.19425-1-pl.schoeman@pocketlaw.net>
Date: Mon, 15 Dec 2025 21:45:41 +0200
From: Pieter-Louis Schoeman <pl.schoeman@...ketlaw.net>
To: aliceryhl@...gle.com,
	arve@...roid.com,
	brauner@...nel.org,
	cmllamas@...gle.com,
	gregkh@...uxfoundation.org,
	tkjos@...roid.com
Cc: linux-kernel@...r.kernel.org,
	ojeda@...nel.org,
	paul@...l-moore.com,
	pl.schoeman@...ketlaw.net,
	tamird@...il.com,
	vitaly.wool@...sulko.se,
	wedsonaf@...il.com,
	yury.norov@...il.com
Subject: [PATCH] binder: rust: pin zero-to-one ref increments to initiating thread

The refcount update path must use the zero-to-one aware API on the first
increment to preserve correct delivery semantics.

Pass the initiating thread through Process::update_ref() and NodeRef so
zero->one increments are pinned when available, while keeping decrement
behavior unchanged.

Also harden update_refcount_locked() to reject increments.

Signed-off-by: Pieter-Louis Schoeman <pl.schoeman@...ketlaw.net>
---
 drivers/android/binder/allocation.rs |   4 +-
 drivers/android/binder/node.rs       | 200 ++++++++++++++++++---------
 drivers/android/binder/process.rs    |  49 ++++++-
 drivers/android/binder/thread.rs     |  44 +++---
 4 files changed, 202 insertions(+), 95 deletions(-)

diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
index 7f65a9c3a0e5..8e8465163069 100644
--- a/drivers/android/binder/allocation.rs
+++ b/drivers/android/binder/allocation.rs
@@ -402,7 +402,7 @@ pub(crate) fn transfer_binder_object(
                     .alloc
                     .process
                     .as_arc_borrow()
-                    .update_ref(handle, false, strong);
+                    .update_ref(handle, false, strong, None);
                 return Err(EINVAL);
             }
         }
@@ -433,7 +433,7 @@ fn cleanup_object(&self, index_offset: usize) -> Result {
                 self.alloc
                     .process
                     .as_arc_borrow()
-                    .update_ref(handle, false, strong)
+                    .update_ref(handle, false, strong, None)
             }
             _ => Ok(()),
         }
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
index c26d113ede96..0f52e3ade821 100644
--- a/drivers/android/binder/node.rs
+++ b/drivers/android/binder/node.rs
@@ -21,6 +21,7 @@
 };
 
 use core::mem;
+use core::num::NonZeroUsize;
 
 mod wrapper;
 pub(crate) use self::wrapper::CritIncrWrapper;
@@ -204,6 +205,73 @@ impl ListItem<0> for DTRWrap<Transaction> {
 }
 
 impl Node {
+    pub(crate) fn dec_refcount_locked(
+        self: &DArc<Node>,
+        strong: bool,
+        count: NonZeroUsize,
+        owner_inner: &mut ProcessInner,
+    ) -> Option<DLArc<Node>> {
+        let is_dead = owner_inner.is_dead;
+        let inner = self.inner.access_mut(owner_inner);
+
+        let state = if strong {
+            &mut inner.strong
+        } else {
+            &mut inner.weak
+        };
+
+        let dec = count.get();
+        if state.count < dec {
+            pr_err!("Failure: refcount underflow!");
+            return None;
+        }
+
+        state.count -= dec;
+        let need_push = !is_dead && state.count == 0 && state.has_count;
+
+        if need_push && inner.delivery_state.should_normal_push() {
+            let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+            inner.delivery_state.did_normal_push();
+            Some(list_arc)
+        } else {
+            None
+        }
+    }
+
+    pub(crate) fn add_refcount_locked(
+        self: &DArc<Self>,
+        strong: bool,
+        delta: usize,
+        owner_inner: &mut ProcessInner,
+    ) {
+        if delta == 0 {
+            return;
+        }
+
+        let inner = self.inner.access_mut(owner_inner);
+        let state = if strong {
+            &mut inner.strong
+        } else {
+            &mut inner.weak
+        };
+
+        match state.count.checked_add(delta) {
+            Some(new_count) => state.count = new_count,
+            None => pr_err!("Failure: refcount overflow!"),
+        }
+    }
+
+    pub(crate) fn update_refcount_with_thread(
+        self: &DArc<Self>,
+        inc: bool,
+        count: usize,
+        strong: bool,
+        thread: Option<&Thread>,
+    ) {
+        let mut owner_inner = self.owner.inner.lock();
+        owner_inner.update_node_refcount(self, inc, strong, count, thread);
+    }
+
     pub(crate) fn new(
         ptr: u64,
         cookie: u64,
@@ -374,37 +442,21 @@ pub(crate) fn update_refcount_locked(
         count: usize,
         owner_inner: &mut ProcessInner,
     ) -> Option<DLArc<Node>> {
-        let is_dead = owner_inner.is_dead;
-        let inner = self.inner.access_mut(owner_inner);
-
-        // Get a reference to the state we'll update.
-        let state = if strong {
-            &mut inner.strong
-        } else {
-            &mut inner.weak
-        };
-
-        // Update the count and determine whether we need to push work.
-        let need_push = if inc {
-            state.count += count;
-            // TODO: This method shouldn't be used for zero-to-one increments.
-            !is_dead && !state.has_count
-        } else {
-            if state.count < count {
-                pr_err!("Failure: refcount underflow!");
-                return None;
-            }
-            state.count -= count;
-            !is_dead && state.count == 0 && state.has_count
-        };
-
-        if need_push && inner.delivery_state.should_normal_push() {
-            let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
-            inner.delivery_state.did_normal_push();
-            Some(list_arc)
-        } else {
-            None
+        debug_assert!(
+            !inc,
+            "update_refcount_locked: increments forbidden; use incr_refcount_allow_zero2one*"
+        );
+        if inc {
+            pr_err!(
+                "update_refcount_locked called for increment; use incr_refcount_allow_zero2one*"
+            );
+            return None;
         }
+        let nz = match NonZeroUsize::new(count) {
+            Some(nz) => nz,
+            None => return None,
+        };
+        self.dec_refcount_locked(strong, nz, owner_inner)
     }
 
     pub(crate) fn incr_refcount_allow_zero2one(
@@ -773,6 +825,55 @@ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> S
         }
     }
 
+    pub(crate) fn update_with_thread(
+        &mut self,
+        inc: bool,
+        strong: bool,
+        thread: Option<&Thread>,
+    ) -> bool {
+        if strong && self.strong_count == 0 {
+            return false;
+        }
+        let (count, node_count, other_count) = if strong {
+            (
+                &mut self.strong_count,
+                &mut self.strong_node_count,
+                self.weak_count,
+            )
+        } else {
+            (
+                &mut self.weak_count,
+                &mut self.weak_node_count,
+                self.strong_count,
+            )
+        };
+        if inc {
+            if *count == 0 {
+                *node_count = 1;
+                // Critical: zero --> one must be pinned to the initiating thread when available.
+                self.node
+                    .update_refcount_with_thread(true, 1, strong, thread);
+            }
+            *count += 1;
+        } else {
+            if *count == 0 {
+                pr_warn!(
+                    "pid {} performed invalid decrement on ref\n",
+                    kernel::current!().pid()
+                );
+                return false;
+            }
+            *count -= 1;
+            if *count == 0 {
+                // Decrements are not "critical"; preserve existing semantics (no thread pinning).
+                self.node.update_refcount(false, *node_count, strong);
+                *node_count = 0;
+                return other_count == 0;
+            }
+        }
+        false
+    }
+
     pub(crate) fn absorb(&mut self, mut other: Self) {
         assert!(
             Arc::ptr_eq(&self.node, &other.node),
@@ -826,44 +927,7 @@ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
     ///
     /// Returns whether `self` should be removed (when both counts are zero).
     pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
-        if strong && self.strong_count == 0 {
-            return false;
-        }
-        let (count, node_count, other_count) = if strong {
-            (
-                &mut self.strong_count,
-                &mut self.strong_node_count,
-                self.weak_count,
-            )
-        } else {
-            (
-                &mut self.weak_count,
-                &mut self.weak_node_count,
-                self.strong_count,
-            )
-        };
-        if inc {
-            if *count == 0 {
-                *node_count = 1;
-                self.node.update_refcount(true, 1, strong);
-            }
-            *count += 1;
-        } else {
-            if *count == 0 {
-                pr_warn!(
-                    "pid {} performed invalid decrement on ref\n",
-                    kernel::current!().pid()
-                );
-                return false;
-            }
-            *count -= 1;
-            if *count == 0 {
-                self.node.update_refcount(false, *node_count, strong);
-                *node_count = 0;
-                return other_count == 0;
-            }
-        }
-        false
+        self.update_with_thread(inc, strong, None)
     }
 }
 
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index 132055b4790f..1205320770a0 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -222,15 +222,53 @@ pub(crate) fn update_node_refcount(
         count: usize,
         othread: Option<&Thread>,
     ) {
-        let push = node.update_refcount_locked(inc, strong, count, self);
+        let push: Option<DLArc<dyn DeliverToRead>> = if inc {
+            if count == 0 {
+                None
+            } else {
+                // First increment must use the zero2one-aware API so delivery_state is correct.
+                let first_push = match node.incr_refcount_allow_zero2one(strong, self) {
+                    Ok(Some(work)) => Some(work as _),
+                    Ok(None) => None,
+                    Err(CouldNotDeliverCriticalIncrement) => {
+                        // Strong-only: we need the wrapper to avoid misdelivery.
+                        match CritIncrWrapper::new() {
+                            Ok(wrapper) => node
+                                .incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+                            Err(_) => {
+                                // OOM fallback: keep the refcount,
+                                // but rely on the already
+                                // scheduled node to eventually deliver (degrades
+                                // "critical" guarantee).
+                                node.add_refcount_locked(strong, 1, self);
+                                None
+                            }
+                        }
+                    }
+                };
+
+                if count > 1 {
+                    node.add_refcount_locked(strong, count - 1, self);
+                }
+
+                first_push
+            }
+        } else {
+            match core::num::NonZeroUsize::new(count) {
+                Some(nz) => node
+                    .dec_refcount_locked(strong, nz, self)
+                    .map(|work| work as _),
+                None => None,
+            }
+        };
 
         // If we decided that we need to push work, push either to the process or to a thread if
         // one is specified.
-        if let Some(node) = push {
+        if let Some(work) = push {
             if let Some(thread) = othread {
-                thread.push_work_deferred(node);
+                thread.push_work_deferred(work);
             } else {
-                let _ = self.push_work(node);
+                let _ = self.push_work(work);
                 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
                 // that case, it doesn't care about the notification.
             }
@@ -922,6 +960,7 @@ pub(crate) fn update_ref(
         handle: u32,
         inc: bool,
         strong: bool,
+        thread: Option<&Thread>,
     ) -> Result {
         if inc && handle == 0 {
             if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
@@ -937,7 +976,7 @@ pub(crate) fn update_ref(
         // increment references on itself.
         let mut refs = self.node_refs.lock();
         if let Some(info) = refs.by_handle.get_mut(&handle) {
-            if info.node_ref().update(inc, strong) {
+            if info.node_ref().update_with_thread(inc, strong, thread) {
                 // Clean up death if there is one attached to this node reference.
                 if let Some(death) = info.death().take() {
                     death.set_cleared(true);
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
index 1a8e6fdc0dc4..ea6de38ceef8 100644
--- a/drivers/android/binder/thread.rs
+++ b/drivers/android/binder/thread.rs
@@ -1330,26 +1330,30 @@ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
                         drop(buffer);
                     }
                 }
-                BC_INCREFS => {
-                    self.process
-                        .as_arc_borrow()
-                        .update_ref(reader.read()?, true, false)?
-                }
-                BC_ACQUIRE => {
-                    self.process
-                        .as_arc_borrow()
-                        .update_ref(reader.read()?, true, true)?
-                }
-                BC_RELEASE => {
-                    self.process
-                        .as_arc_borrow()
-                        .update_ref(reader.read()?, false, true)?
-                }
-                BC_DECREFS => {
-                    self.process
-                        .as_arc_borrow()
-                        .update_ref(reader.read()?, false, false)?
-                }
+                BC_INCREFS => self.process.as_arc_borrow().update_ref(
+                    reader.read()?,
+                    true,
+                    false,
+                    Some(self),
+                )?,
+                BC_ACQUIRE => self.process.as_arc_borrow().update_ref(
+                    reader.read()?,
+                    true,
+                    true,
+                    Some(self),
+                )?,
+                BC_RELEASE => self.process.as_arc_borrow().update_ref(
+                    reader.read()?,
+                    false,
+                    true,
+                    Some(self),
+                )?,
+                BC_DECREFS => self.process.as_arc_borrow().update_ref(
+                    reader.read()?,
+                    false,
+                    false,
+                    Some(self),
+                )?,
                 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
                 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
                 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
-- 
2.51.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ