[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251020-binder-bitmap-v1-2-879bec9cddc1@google.com>
Date: Mon, 20 Oct 2025 13:33:42 +0000
From: Alice Ryhl <aliceryhl@...gle.com>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>, Yury Norov <yury.norov@...il.com>
Cc: "Arve Hjønnevåg" <arve@...roid.com>, Todd Kjos <tkjos@...roid.com>,
Martijn Coenen <maco@...roid.com>, Joel Fernandes <joelagnelf@...dia.com>,
Christian Brauner <brauner@...nel.org>, Carlos Llamas <cmllamas@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>, Burak Emir <bqe@...gle.com>, Miguel Ojeda <ojeda@...nel.org>,
Boqun Feng <boqun.feng@...il.com>, Gary Guo <gary@...yguo.net>,
"Björn Roy Baron" <bjorn3_gh@...tonmail.com>, Benno Lossin <lossin@...nel.org>,
Andreas Hindborg <a.hindborg@...nel.org>, Trevor Gross <tmgross@...ch.edu>,
Danilo Krummrich <dakr@...nel.org>, rust-for-linux@...r.kernel.org,
linux-kernel@...r.kernel.org, Alice Ryhl <aliceryhl@...gle.com>
Subject: [PATCH 2/2] rust_binder: use bitmap for allocation of handles
To find an unused Binder handle, Rust Binder currently iterates the
red/black tree from the beginning until it finds a gap in the keys. This
is extremely slow.
To improve the performance, add a bitmap that keeps track of which
indices are actually in use. This allows us to quickly find an unused
key in the red/black tree.
This logic matches the approach used by C Binder. It was chosen
partially because it's the most memory efficient solution.
Signed-off-by: Alice Ryhl <aliceryhl@...gle.com>
---
drivers/android/binder/process.rs | 110 +++++++++++++++++++++++++++++++-------
1 file changed, 90 insertions(+), 20 deletions(-)
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index f13a747e784c84a0fb09cbf47442712106eba07c..357ba1b577c73ad3f2b525a8573424420577e92d 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -16,6 +16,7 @@
use kernel::{
bindings,
+ bitmap::BitmapVec,
cred::Credential,
error::Error,
fs::file::{self, File},
@@ -367,6 +368,8 @@ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
struct ProcessNodeRefs {
/// Used to look up nodes using the 32-bit id that this process knows it by.
by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_present: BitmapVec,
/// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
/// the underlying `Node` struct as returned by `Node::global_id`.
by_node: RBTree<usize, u32>,
@@ -378,13 +381,56 @@ struct ProcessNodeRefs {
}
impl ProcessNodeRefs {
- fn new() -> Self {
- Self {
+ fn new() -> Result<Self> {
+ Ok(Self {
by_handle: RBTree::new(),
+ handle_present: BitmapVec::new(BitmapVec::NO_ALLOC_MAX_LEN, GFP_KERNEL)?,
by_node: RBTree::new(),
freeze_listeners: RBTree::new(),
+ })
+ }
+
+ fn bitmap_shrink_len(&self) -> Option<usize> {
+ let nbits = self.handle_present.len();
+
+ if nbits <= BitmapVec::NO_ALLOC_MAX_LEN {
+ return None;
+ }
+
+ match self.handle_present.last_bit() {
+ Some(bit) if bit < nbits >> 2 => Some(nbits >> 1),
+ None => Some(BitmapVec::NO_ALLOC_MAX_LEN),
+ _ => None,
+ }
+ }
+
+ fn bitmap_grow_len(&self) -> Option<usize> {
+ let new_len = self
+ .handle_present
+ .len()
+ .checked_mul(2)
+ .unwrap_or(BitmapVec::MAX_LEN)
+ .min(BitmapVec::MAX_LEN);
+
+ if new_len > self.handle_present.len() {
+ Some(new_len)
+ } else {
+ None
+ }
+ }
+
+ fn try_shrink_bitmap(&mut self, new_map: BitmapVec) {
+ if let Some(target_len) = self.bitmap_shrink_len() {
+ if new_map.len() == target_len {
+ self.replace_bitmap(new_map);
+ }
}
}
+
+ fn replace_bitmap(&mut self, mut new_map: BitmapVec) {
+ new_map.copy_and_extend(&self.handle_present);
+ self.handle_present = new_map;
+ }
}
/// A process using binder.
@@ -471,7 +517,7 @@ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
cred,
inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
- node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new()?, "Process::node_refs"),
freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
task: current.group_leader().into(),
defer_work <- kernel::new_work!("Process::defer_work"),
@@ -775,7 +821,7 @@ pub(crate) fn get_node(
pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>,
node_ref: NodeRef,
- is_mananger: bool,
+ is_manager: bool,
) -> Result<u32> {
{
let mut refs = self.node_refs.lock();
@@ -794,7 +840,34 @@ pub(crate) fn insert_or_update_handle(
let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
let info = UniqueArc::new_uninit(GFP_KERNEL)?;
- let mut refs = self.node_refs.lock();
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (handle, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(handle) = refs.handle_present.next_zero_bit(start) {
+ let handle_u32 = u32::try_from(handle).map_err(|_| ENOMEM)?;
+ match refs.by_handle.entry(handle_u32) {
+ rbtree::Entry::Vacant(entry) => break (handle_u32, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_present and by_handle");
+ refs.handle_present.set_bit(handle);
+ continue;
+ }
+ }
+ }
+
+ let new_len = refs.bitmap_grow_len().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let new_bitmap = BitmapVec::new(new_len, GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ if refs.handle_present.len() < new_len {
+ refs.replace_bitmap(new_bitmap);
+ }
+ };
// Do a lookup again as node may have been inserted before the lock was reacquired.
if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
@@ -804,20 +877,9 @@ pub(crate) fn insert_or_update_handle(
return Ok(handle);
}
- // Find id.
- let mut target: u32 = if is_mananger { 0 } else { 1 };
- for handle in refs.by_handle.keys() {
- if *handle > target {
- break;
- }
- if *handle == target {
- target = target.checked_add(1).ok_or(ENOMEM)?;
- }
- }
-
let gid = node_ref.node.global_id();
let (info_proc, info_node) = {
- let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
match info.pin_init_with(info_init) {
Ok(info) => ListArc::pair_from_pin_unique(info),
// error is infallible
@@ -838,9 +900,9 @@ pub(crate) fn insert_or_update_handle(
// `info_node` into the right node's `refs` list.
unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
- refs.by_node.insert(reserve1.into_node(gid, target));
- refs.by_handle.insert(reserve2.into_node(target, info_proc));
- Ok(target)
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ Ok(handle)
}
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
@@ -905,6 +967,14 @@ pub(crate) fn update_ref(
let id = info.node_ref().node.global_id();
refs.by_handle.remove(&handle);
refs.by_node.remove(&id);
+ refs.handle_present.clear_bit(handle as usize);
+
+ if let Some(shrink_len) = refs.bitmap_shrink_len() {
+ drop(refs);
+ let new_bitmap = BitmapVec::new(shrink_len, GFP_KERNEL)?;
+ refs = self.node_refs.lock();
+ refs.try_shrink_bitmap(new_bitmap);
+ }
}
} else {
// All refs are cleared in process exit, so this warning is expected in that case.
--
2.51.0.915.g61a8936c21-goog
Powered by blists - more mailing lists