[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20251105-rust-percpu-v4-7-984b1470adcb@gmail.com>
Date: Wed, 05 Nov 2025 15:01:19 -0800
From: Mitchell Levy <levymitchell0@...il.com>
To: Miguel Ojeda <ojeda@...nel.org>, Alex Gaynor <alex.gaynor@...il.com>,
Boqun Feng <boqun.feng@...il.com>, Gary Guo <gary@...yguo.net>,
Björn Roy Baron <bjorn3_gh@...tonmail.com>,
Andreas Hindborg <a.hindborg@...nel.org>, Alice Ryhl <aliceryhl@...gle.com>,
Trevor Gross <tmgross@...ch.edu>, Andrew Morton <akpm@...ux-foundation.org>,
Dennis Zhou <dennis@...nel.org>, Tejun Heo <tj@...nel.org>,
Christoph Lameter <cl@...ux.com>, Danilo Krummrich <dakr@...nel.org>,
Benno Lossin <lossin@...nel.org>, Yury Norov <yury.norov@...il.com>,
Viresh Kumar <viresh.kumar@...aro.org>
Cc: Tyler Hicks <code@...icks.com>, Allen Pais <apais@...ux.microsoft.com>,
linux-kernel@...r.kernel.org, rust-for-linux@...r.kernel.org,
linux-mm@...ck.org, Mitchell Levy <levymitchell0@...il.com>
Subject: [PATCH v4 7/9] rust: percpu: Support non-zeroable types for
DynamicPerCpu
Add functionality to `PerCpuPtr` to compute pointers to per-CPU variable
slots on other CPUs. Use this facility to initialize per-CPU variables
on all possible CPUs when a dynamic per-CPU variable is created with a
non-zeroable type. Since `RefCell` and other `Cell`-like types fall into
this category, `impl CheckedPerCpu` on `DynamicPerCpu` for these
`InteriorMutable` types since they can now be used. Add examples of
these usages to `samples/rust/rust_percpu.rs`. Add a test to ensure
dynamic per-CPU variables properly drop their contents, done here since
non-trivially dropped types often aren't `Zeroable`.
Signed-off-by: Mitchell Levy <levymitchell0@...il.com>
---
rust/kernel/percpu/dynamic.rs | 44 +++++++++++++++++
samples/rust/rust_percpu.rs | 109 +++++++++++++++++++++++++++++++++++++++---
2 files changed, 146 insertions(+), 7 deletions(-)
diff --git a/rust/kernel/percpu/dynamic.rs b/rust/kernel/percpu/dynamic.rs
index 1863f31a2817..a74c8841aeb2 100644
--- a/rust/kernel/percpu/dynamic.rs
+++ b/rust/kernel/percpu/dynamic.rs
@@ -89,6 +89,36 @@ pub fn new_zero(flags: Flags) -> Option<Self> {
}
}
+impl<T: Clone> DynamicPerCpu<T> {
+ /// Allocates a new per-CPU variable
+ ///
+ /// # Arguments
+ /// * `val` - The initial value of the per-CPU variable on all CPUs.
+ /// * `flags` - Flags used to allocate an [`Arc`] that keeps track of the underlying
+ /// [`PerCpuAllocation`].
+ pub fn new_with(val: &T, flags: Flags) -> Option<Self> {
+ let alloc: PerCpuAllocation<T> = PerCpuAllocation::new_uninit()?;
+ let ptr = alloc.0;
+
+ for cpu in Cpumask::possible_cpus().iter() {
+ let remote_ptr = ptr.get_remote_ptr(cpu);
+ // SAFETY: `remote_ptr` is valid because `ptr` points to a live allocation and `cpu`
+ // appears in `Cpumask::possible_cpus()`.
+ //
+ // Each CPU's slot corresponding to `ptr` is currently uninitialized, and no one else
+ // has a reference to it. Therefore, we can freely write to it without worrying about
+ // the need to drop what was there or whether we're racing with someone else.
+ unsafe {
+ (*remote_ptr).write(val.clone());
+ }
+ }
+
+ let arc = Arc::new(alloc, flags).ok()?;
+
+ Some(Self { alloc: Some(arc) })
+ }
+}
+
impl<T> PerCpu<T> for DynamicPerCpu<T> {
unsafe fn get_mut(&mut self, guard: CpuGuard) -> PerCpuToken<'_, T> {
// SAFETY:
@@ -105,6 +135,20 @@ unsafe fn get_mut(&mut self, guard: CpuGuard) -> PerCpuToken<'_, T> {
}
}
+impl<T: InteriorMutable> CheckedPerCpu<T> for DynamicPerCpu<T> {
+ fn get(&mut self, guard: CpuGuard) -> CheckedPerCpuToken<'_, T> {
+ // SAFETY:
+ // 1. Invariants of this type assure that `alloc` is `Some`.
+ // 2. The invariants of `DynamicPerCpu` ensure that the contents of the allocation are
+ // initialized on each CPU.
+ // 3. The existence of a reference to the `PerCpuAllocation` ensures that the allocation is
+ // live.
+ // 4. The invariants of `DynamicPerCpu` ensure that the allocation is sized and aligned for
+ // a `T`.
+ unsafe { CheckedPerCpuToken::new(guard, &self.alloc.as_ref().unwrap_unchecked().0) }
+ }
+}
+
impl<T> Drop for DynamicPerCpu<T> {
fn drop(&mut self) {
// SAFETY: This type's invariant ensures that `self.alloc` is `Some`.
diff --git a/samples/rust/rust_percpu.rs b/samples/rust/rust_percpu.rs
index 98ca1c781b6b..be70ee2e513f 100644
--- a/samples/rust/rust_percpu.rs
+++ b/samples/rust/rust_percpu.rs
@@ -11,6 +11,7 @@
percpu::{cpu_guard::*, *},
pr_info,
prelude::*,
+ sync::Arc,
};
module! {
@@ -130,13 +131,81 @@ fn init(_module: &'static ThisModule) -> Result<Self, Error> {
// SAFETY: No prerequisites for on_each_cpu.
unsafe {
- on_each_cpu(Some(inc_percpu), (&raw mut test).cast(), 0);
- on_each_cpu(Some(inc_percpu), (&raw mut test).cast(), 0);
- on_each_cpu(Some(inc_percpu), (&raw mut test).cast(), 0);
- on_each_cpu(Some(inc_percpu), (&raw mut test).cast(), 1);
- on_each_cpu(Some(check_percpu), (&raw mut test).cast(), 1);
+ on_each_cpu(Some(inc_percpu_u64), (&raw mut test).cast(), 0);
+ on_each_cpu(Some(inc_percpu_u64), (&raw mut test).cast(), 0);
+ on_each_cpu(Some(inc_percpu_u64), (&raw mut test).cast(), 0);
+ on_each_cpu(Some(inc_percpu_u64), (&raw mut test).cast(), 1);
+ on_each_cpu(Some(check_percpu_u64), (&raw mut test).cast(), 1);
}
+ let mut checked: DynamicPerCpu<RefCell<u64>> =
+ DynamicPerCpu::new_with(&RefCell::new(100), GFP_KERNEL).unwrap();
+
+ // SAFETY: No prerequisites for on_each_cpu.
+ unsafe {
+ on_each_cpu(Some(inc_percpu_refcell_u64), (&raw mut checked).cast(), 0);
+ on_each_cpu(Some(inc_percpu_refcell_u64), (&raw mut checked).cast(), 0);
+ on_each_cpu(Some(inc_percpu_refcell_u64), (&raw mut checked).cast(), 0);
+ on_each_cpu(Some(inc_percpu_refcell_u64), (&raw mut checked).cast(), 1);
+ on_each_cpu(Some(check_percpu_refcell_u64), (&raw mut checked).cast(), 1);
+ }
+
+ checked.get(CpuGuard::new()).with(|val: &RefCell<u64>| {
+ assert!(*val.borrow() == 104);
+
+ let mut checked_native = 0;
+ *val.borrow_mut() = 0;
+
+ checked_native += 1;
+ *val.borrow_mut() += 1;
+ pr_info!(
+ "Checked native: {}, *checked: {}\n",
+ checked_native,
+ val.borrow()
+ );
+ assert!(checked_native == *val.borrow() && checked_native == 1);
+
+ checked_native = checked_native.wrapping_add((-1i64) as u64);
+ val.replace_with(|old: &mut u64| old.wrapping_add((-1i64) as u64));
+ pr_info!(
+ "Checked native: {}, *checked: {}\n",
+ checked_native,
+ val.borrow()
+ );
+ assert!(checked_native == *val.borrow() && checked_native == 0);
+
+ checked_native = checked_native.wrapping_add((-1i64) as u64);
+ val.replace_with(|old: &mut u64| old.wrapping_add((-1i64) as u64));
+ pr_info!(
+ "Checked native: {}, *checked: {}\n",
+ checked_native,
+ val.borrow()
+ );
+ assert!(checked_native == *val.borrow() && checked_native == (-1i64) as u64);
+
+ checked_native = 0;
+ *val.borrow_mut() = 0;
+
+ checked_native = checked_native.wrapping_sub(1);
+ val.replace_with(|old: &mut u64| old.wrapping_sub(1));
+ pr_info!(
+ "Checked native: {}, *checked: {}\n",
+ checked_native,
+ val.borrow()
+ );
+ assert!(checked_native == *val.borrow() && checked_native == (-1i64) as u64);
+ assert!(checked_native == *val.borrow() && checked_native == u64::MAX);
+ });
+
+ let arc = Arc::new(0, GFP_KERNEL).unwrap();
+ {
+ let _arc_pcpu: DynamicPerCpu<Arc<u64>> =
+ DynamicPerCpu::new_with(&arc, GFP_KERNEL).unwrap();
+ }
+ // `arc` should be unique, since all the clones on each CPU should be dropped when
+ // `_arc_pcpu` is dropped
+ assert!(arc.into_unique_or_drop().is_some());
+
pr_info!("rust dynamic percpu test done\n");
// Return Err to unload the module
@@ -144,7 +213,7 @@ fn init(_module: &'static ThisModule) -> Result<Self, Error> {
}
}
-extern "C" fn inc_percpu(info: *mut c_void) {
+extern "C" fn inc_percpu_u64(info: *mut c_void) {
// SAFETY: We know that info is a void *const DynamicPerCpu<u64> and DynamicPerCpu<u64> is Send.
let mut pcpu = unsafe { (*(info as *const DynamicPerCpu<u64>)).clone() };
pr_info!("Incrementing on {}\n", CpuId::current().as_u32());
@@ -153,7 +222,7 @@ extern "C" fn inc_percpu(info: *mut c_void) {
unsafe { pcpu.get_mut(CpuGuard::new()) }.with(|val: &mut u64| *val += 1);
}
-extern "C" fn check_percpu(info: *mut c_void) {
+extern "C" fn check_percpu_u64(info: *mut c_void) {
// SAFETY: We know that info is a void *const DynamicPerCpu<u64> and DynamicPerCpu<u64> is Send.
let mut pcpu = unsafe { (*(info as *const DynamicPerCpu<u64>)).clone() };
pr_info!("Asserting on {}\n", CpuId::current().as_u32());
@@ -161,3 +230,29 @@ extern "C" fn check_percpu(info: *mut c_void) {
// SAFETY: We don't have multiple clones of pcpu in scope
unsafe { pcpu.get_mut(CpuGuard::new()) }.with(|val: &mut u64| assert!(*val == 4));
}
+
+extern "C" fn inc_percpu_refcell_u64(info: *mut c_void) {
+ // SAFETY: We know that info is a void *const DynamicPerCpu<RefCell<u64>> and
+ // DynamicPerCpu<RefCell<u64>> is Send.
+ let mut pcpu = unsafe { (*(info as *const DynamicPerCpu<RefCell<u64>>)).clone() };
+ // SAFETY: smp_processor_id has no preconditions
+ pr_info!("Incrementing on {}\n", CpuId::current().as_u32());
+
+ pcpu.get(CpuGuard::new()).with(|val: &RefCell<u64>| {
+ let mut val = val.borrow_mut();
+ *val += 1;
+ });
+}
+
+extern "C" fn check_percpu_refcell_u64(info: *mut c_void) {
+ // SAFETY: We know that info is a void *const DynamicPerCpu<RefCell<u64>> and
+ // DynamicPerCpu<RefCell<u64>> is Send.
+ let mut pcpu = unsafe { (*(info as *const DynamicPerCpu<RefCell<u64>>)).clone() };
+ // SAFETY: smp_processor_id has no preconditions
+ pr_info!("Asserting on {}\n", CpuId::current().as_u32());
+
+ pcpu.get(CpuGuard::new()).with(|val: &RefCell<u64>| {
+ let val = val.borrow();
+ assert!(*val == 104);
+ });
+}
--
2.34.1
Powered by blists - more mailing lists