[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <DG80KWRTCM3Z.3PYV9P6Y5414F@garyguo.net>
Date: Fri, 06 Feb 2026 16:20:03 +0000
From: "Gary Guo" <gary@...yguo.net>
To: <jongan.kim@....com>, <aliceryhl@...gle.com>, <a.hindborg@...nel.org>,
<arve@...roid.com>, <bjorn3_gh@...tonmail.com>, <boqun.feng@...il.com>,
<brauner@...nel.org>, <cmllamas@...gle.com>, <dakr@...nel.org>,
<daniel.almeida@...labora.com>, <gary@...yguo.net>,
<gregkh@...uxfoundation.org>, <tamird@...il.com>, <tkjos@...roid.com>,
<tmgross@...ch.edu>, <viresh.kumar@...aro.org>, <vitaly.wool@...sulko.se>,
<yury.norov@...il.com>, <ojeda@...nel.org>, <lossin@...nel.org>
Cc: <heesu0025.kim@....com>, <ht.hong@....com>, <jungsu.hwang@....com>,
<kernel-team@...roid.com>, <linux-kernel@...r.kernel.org>,
<rust-for-linux@...r.kernel.org>, <sanghun.lee@....com>,
<seulgi.lee@....com>, <sunghoon.kim@....com>
Subject: Re: [PATCH v4 3/3] rust_binder: fix PID namespace collision for
freeze operation
On Fri Feb 6, 2026 at 8:53 AM GMT, jongan.kim wrote:
> From: HeeSu Kim <heesu0025.kim@....com>
>
> Port PID namespace conversion logic from C binder to the Rust
> implementation.
>
> Without namespace conversion, freeze operations from non-init namespaces
> can match wrong processes due to PID collision. This adds proper
> conversion to ensure freeze operations target the correct process.
>
> This patch fixes the issue by:
> - Add get_task_from_vpid() to translate VPID to Task reference
> - Add Context::get_procs_with_task() for Task-based process lookup
> using &mut KVec parameter to avoid intermediate allocations
> - Update get_frozen_status() and ioctl_freeze() to use Task comparison
>
> Suggested-by: Gary Guo <gary@...yguo.net>
> Link: https://lore.kernel.org/rust-for-linux/DG5CFX3ML5YL.2FE913F20LNPT@garyguo.net/
> Suggested-by: Alice Ryhl <aliceryhl@...gle.com>
> Link: https://lore.kernel.org/lkml/aXs5Y3xAFKyZr6nd@google.com/
> Signed-off-by: Heesu Kim <heesu0025.kim@....com>
> ---
> v3 -> v4
> - change subject name more clearly
> - Use Task pointer comparison instead of PID number comparison
> - Remove PidNamespace dependency entirely
> - Use &mut KVec parameter to avoid intermediate allocation
> - Merge context.rs and process.rs changes into single patch
>
> v2 -> v3:
> - Use task::Pid typedef instead of u32/i32
> - Use PidNamespace::init_ns() instead of init_pid_ns()
> - Compare PidNamespace directly with == instead of raw pointers
> - Use Pid::find_vpid() and pid.pid_task() (dropped _with_guard suffix)
> - Fix rustfmt import ordering (rcu before Arc)
> - Rename TaskPid alias to PidT for clearer pid_t type indication
> - Use task.group_leader().pid() instead of tgid_nr_ns() for consistency with C
>
> drivers/android/binder/context.rs | 16 +++++++++++++++-
> drivers/android/binder/process.rs | 25 +++++++++++++++++++------
> 2 files changed, 34 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
> index 3d135ec03ca7..1fc779e4d9ce 100644
> --- a/drivers/android/binder/context.rs
> +++ b/drivers/android/binder/context.rs
> @@ -9,7 +9,7 @@
> security,
> str::{CStr, CString},
> sync::{Arc, Mutex},
> - task::Kuid,
> + task::{Kuid, Task},
> };
>
> use crate::{error::BinderError, node::NodeRef, process::Process};
> @@ -177,4 +177,18 @@ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>>
> }
> Ok(backing)
> }
> +
> + pub(crate) fn get_procs_with_task(
> + &self,
> + target: &Task,
> + out: &mut KVec<Arc<Process>>,
> + ) -> Result {
> + let lock = self.manager.lock();
> + for proc in &lock.all_procs {
> + if core::ptr::eq(&*proc.task, target) {
I think we should just as a `PartialEq` and `Eq` impl to `Task`.
> + out.push(Arc::from(proc), GFP_KERNEL)?;
> + }
> + }
> + Ok(())
> + }
> }
> diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
> index 132055b4790f..58e816f8873f 100644
> --- a/drivers/android/binder/process.rs
> +++ b/drivers/android/binder/process.rs
> @@ -22,6 +22,7 @@
> id_pool::IdPool,
> list::{List, ListArc, ListArcField, ListLinks},
> mm,
> + pid::Pid,
> prelude::*,
> rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
> seq_file::SeqFile,
> @@ -29,9 +30,9 @@
> sync::poll::PollTable,
> sync::{
> lock::{spinlock::SpinLockBackend, Guard},
> - Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
> + rcu, Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
> },
> - task::Task,
> + task::{Pid as PidT, Task},
> types::ARef,
> uaccess::{UserSlice, UserSliceReader},
> uapi,
> @@ -1498,17 +1499,29 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
> }
> }
>
> +/// Get Task reference from VPID with refcount increment.
> +fn get_task_from_vpid(pid: PidT) -> Result<ARef<Task>> {
> + let rcu_guard = rcu::read_lock();
> + let pid_struct = Pid::find_vpid(pid, &rcu_guard).ok_or(ESRCH)?;
> + let task = pid_struct.pid_task(&rcu_guard).ok_or(ESRCH)?;
> +
> + Ok(ARef::from(task))
> +}
> +
> fn get_frozen_status(data: UserSlice) -> Result {
> let (mut reader, mut writer) = data.reader_writer();
>
> let mut info = reader.read::<BinderFrozenStatusInfo>()?;
> +
> + let target_task = get_task_from_vpid(info.pid as PidT)?;
> +
> info.sync_recv = 0;
> info.async_recv = 0;
> let mut found = false;
>
> for ctx in crate::context::get_all_contexts()? {
> ctx.for_each_proc(|proc| {
> - if proc.task.pid() == info.pid as _ {
> + if core::ptr::eq(&*proc.task, &*target_task) {
Same here, could use `==` if you have `PartialEq` impl on task.
Best,
Gary
> found = true;
> let inner = proc.inner.lock();
> let txns_pending = inner.txns_pending_locked();
> @@ -1530,15 +1543,15 @@ fn get_frozen_status(data: UserSlice) -> Result {
> fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
> let info = reader.read::<BinderFreezeInfo>()?;
>
> + let target_task = get_task_from_vpid(info.pid as PidT)?;
> +
> // Very unlikely for there to be more than 3, since a process normally uses at most binder and
> // hwbinder.
> let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
>
> let ctxs = crate::context::get_all_contexts()?;
> for ctx in ctxs {
> - for proc in ctx.get_procs_with_pid(info.pid as i32)? {
> - procs.push(proc, GFP_KERNEL)?;
> - }
> + ctx.get_procs_with_task(&target_task, &mut procs)?;
> }
>
> for proc in procs {
Powered by blists - more mailing lists