[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251105-rust-percpu-v4-8-984b1470adcb@gmail.com>
Date: Wed, 05 Nov 2025 15:01:20 -0800
From: Mitchell Levy <levymitchell0@...il.com>
To: Miguel Ojeda <ojeda@...nel.org>, Alex Gaynor <alex.gaynor@...il.com>,
Boqun Feng <boqun.feng@...il.com>, Gary Guo <gary@...yguo.net>,
Björn Roy Baron <bjorn3_gh@...tonmail.com>,
Andreas Hindborg <a.hindborg@...nel.org>, Alice Ryhl <aliceryhl@...gle.com>,
Trevor Gross <tmgross@...ch.edu>, Andrew Morton <akpm@...ux-foundation.org>,
Dennis Zhou <dennis@...nel.org>, Tejun Heo <tj@...nel.org>,
Christoph Lameter <cl@...ux.com>, Danilo Krummrich <dakr@...nel.org>,
Benno Lossin <lossin@...nel.org>, Yury Norov <yury.norov@...il.com>,
Viresh Kumar <viresh.kumar@...aro.org>
Cc: Tyler Hicks <code@...icks.com>, Allen Pais <apais@...ux.microsoft.com>,
linux-kernel@...r.kernel.org, rust-for-linux@...r.kernel.org,
linux-mm@...ck.org, Mitchell Levy <levymitchell0@...il.com>
Subject: [PATCH v4 8/9] rust: percpu: Add pin-hole optimizations for
numerics
The C implementations of `this_cpu_add`, `this_cpu_sub`, etc., are
optimized to save an instruction by avoiding having to compute
`this_cpu_ptr(&x)` for some per-CPU variable `x`. For example, rather
than
u64 *x_ptr = this_cpu_ptr(&x);
*x_ptr += 5;
the implementation of `this_cpu_add` is clever enough to make use of the
fact that per-CPU variables are implemented on x86 via segment
registers, and so we can use only a single instruction (where we assume
`&x` is already in `rax`)
add gs:[rax], 5
Add this optimization via a `PerCpuNumeric` type to enable code-reuse
between `DynamicPerCpu` and `StaticPerCpu`.
Signed-off-by: Mitchell Levy <levymitchell0@...il.com>
---
rust/kernel/percpu.rs | 1 +
rust/kernel/percpu/dynamic.rs | 10 ++-
rust/kernel/percpu/numeric.rs | 138 ++++++++++++++++++++++++++++++++++++++++++
samples/rust/rust_percpu.rs | 36 +++++++++++
4 files changed, 184 insertions(+), 1 deletion(-)
diff --git a/rust/kernel/percpu.rs b/rust/kernel/percpu.rs
index 2db670c87fae..c1148cb36eff 100644
--- a/rust/kernel/percpu.rs
+++ b/rust/kernel/percpu.rs
@@ -6,6 +6,7 @@
pub mod cpu_guard;
mod dynamic;
+pub mod numeric;
mod static_;
#[doc(inline)]
diff --git a/rust/kernel/percpu/dynamic.rs b/rust/kernel/percpu/dynamic.rs
index a74c8841aeb2..99acbf6363f5 100644
--- a/rust/kernel/percpu/dynamic.rs
+++ b/rust/kernel/percpu/dynamic.rs
@@ -18,7 +18,7 @@
/// the memory location on any particular CPU has been initialized. This means that it cannot tell
/// whether it should drop the *contents* of the allocation when it is dropped. It is up to the
/// user to do this via something like [`core::ptr::drop_in_place`].
-pub struct PerCpuAllocation<T>(PerCpuPtr<T>);
+pub struct PerCpuAllocation<T>(pub(super) PerCpuPtr<T>);
impl<T: Zeroable> PerCpuAllocation<T> {
/// Dynamically allocates a space in the per-CPU area suitably sized and aligned to hold a `T`,
@@ -119,6 +119,14 @@ pub fn new_with(val: &T, flags: Flags) -> Option<Self> {
}
}
+impl<T> DynamicPerCpu<T> {
+ /// Gets the allocation backing this per-CPU variable.
+ pub(crate) fn alloc(&self) -> &Arc<PerCpuAllocation<T>> {
+ // SAFETY: This type's invariant ensures that `self.alloc` is `Some`.
+ unsafe { self.alloc.as_ref().unwrap_unchecked() }
+ }
+}
+
impl<T> PerCpu<T> for DynamicPerCpu<T> {
unsafe fn get_mut(&mut self, guard: CpuGuard) -> PerCpuToken<'_, T> {
// SAFETY:
diff --git a/rust/kernel/percpu/numeric.rs b/rust/kernel/percpu/numeric.rs
new file mode 100644
index 000000000000..e76461f05c66
--- /dev/null
+++ b/rust/kernel/percpu/numeric.rs
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+//! Pin-hole optimizations for [`PerCpu<T>`] where T is a numeric type.
+
+use super::*;
+use core::arch::asm;
+
+/// Represents a per-CPU variable that can be manipulated with machine-intrinsic numeric
+/// operations.
+pub struct PerCpuNumeric<'a, T> {
+ // INVARIANT: `ptr.0` is a valid offset into the per-CPU area and is initialized on all CPUs
+ // (since we don't have a CPU guard, we have to be pessimistic and assume we could be on any
+ // CPU).
+ ptr: &'a PerCpuPtr<T>,
+}
+
+macro_rules! impl_ops {
+ ($ty:ty, $reg:tt) => {
+ impl DynamicPerCpu<$ty> {
+ /// Returns a [`PerCpuNumeric`] that can be used to manipulate the underlying per-CPU
+ /// variable.
+ #[inline]
+ pub fn num(&mut self) -> PerCpuNumeric<'_, $ty> {
+ // The invariant is satisfied because `DynamicPerCpu`'s invariant guarantees that
+ // this pointer is valid and initialized on all CPUs.
+ PerCpuNumeric { ptr: &self.alloc().0 }
+ }
+ }
+ impl StaticPerCpu<$ty> {
+ /// Returns a [`PerCpuNumeric`] that can be used to manipulate the underlying per-CPU
+ /// variable.
+ #[inline]
+ pub fn num(&mut self) -> PerCpuNumeric<'_, $ty> {
+ // The invariant is satisfied because `StaticPerCpu`'s invariant guarantees that
+ // this pointer is valid and initialized on all CPUs.
+ PerCpuNumeric { ptr: &self.0 }
+ }
+ }
+
+ impl PerCpuNumeric<'_, $ty> {
+ /// Adds `rhs` to the per-CPU variable.
+ #[inline]
+ pub fn add(&mut self, rhs: $ty) {
+ // SAFETY: `self.ptr.0` is a valid offset into the per-CPU area (i.e., valid as a
+ // pointer relative to the `gs` segment register) by the invariants of this type.
+ unsafe {
+ asm!(
+ concat!("add gs:[{off}], {val:", $reg, "}"),
+ off = in(reg) self.ptr.0.cast::<*mut $ty>(),
+ val = in(reg) rhs,
+ );
+ }
+ }
+ }
+ impl PerCpuNumeric<'_, $ty> {
+ /// Subtracts `rhs` from the per-CPU variable.
+ #[inline]
+ pub fn sub(&mut self, rhs: $ty) {
+ // SAFETY: `self.ptr.0` is a valid offset into the per-CPU area (i.e., valid as a
+ // pointer relative to the `gs` segment register) by the invariants of this type.
+ unsafe {
+ asm!(
+ concat!("sub gs:[{off}], {val:", $reg, "}"),
+ off = in(reg) self.ptr.0.cast::<*mut $ty>(),
+ val = in(reg) rhs,
+ );
+ }
+ }
+ }
+ };
+}
+
+macro_rules! impl_ops_byte {
+ ($ty:ty) => {
+ impl DynamicPerCpu<$ty> {
+ /// Returns a [`PerCpuNumeric`] that can be used to manipulate the underlying per-CPU
+ /// variable.
+ #[inline]
+ pub fn num(&mut self) -> PerCpuNumeric<'_, $ty> {
+ // The invariant is satisfied because `DynamicPerCpu`'s invariant guarantees that
+ // this pointer is valid and initialized on all CPUs.
+ PerCpuNumeric { ptr: &self.alloc().0 }
+ }
+ }
+ impl StaticPerCpu<$ty> {
+ /// Returns a [`PerCpuNumeric`] that can be used to manipulate the underlying per-CPU
+ /// variable.
+ #[inline]
+ pub fn num(&mut self) -> PerCpuNumeric<'_, $ty> {
+ // The invariant is satisfied because `DynamicPerCpu`'s invariant guarantees that
+ // this pointer is valid and initialized on all CPUs.
+ PerCpuNumeric { ptr: &self.0 }
+ }
+ }
+
+ impl PerCpuNumeric<'_, $ty> {
+ /// Adds `rhs` to the per-CPU variable.
+ #[inline]
+ pub fn add(&mut self, rhs: $ty) {
+ // SAFETY: `self.ptr.0` is a valid offset into the per-CPU area (i.e., valid as a
+ // pointer relative to the `gs` segment register) by the invariants of this type.
+ unsafe {
+ asm!(
+ concat!("add gs:[{off}], {val}"),
+ off = in(reg) self.ptr.0.cast::<*mut $ty>(),
+ val = in(reg_byte) rhs,
+ );
+ }
+ }
+ }
+ impl PerCpuNumeric<'_, $ty> {
+ /// Subtracts `rhs` from the per-CPU variable.
+ #[inline]
+ pub fn sub(&mut self, rhs: $ty) {
+ // SAFETY: `self.ptr.0` is a valid offset into the per-CPU area (i.e., valid as a
+ // pointer relative to the `gs` segment register) by the invariants of this type.
+ unsafe {
+ asm!(
+ concat!("sub gs:[{off}], {val}"),
+ off = in(reg) self.ptr.0.cast::<*mut $ty>(),
+ val = in(reg_byte) rhs,
+ );
+ }
+ }
+ }
+ };
+}
+
+impl_ops_byte!(i8);
+impl_ops!(i16, "x");
+impl_ops!(i32, "e");
+impl_ops!(i64, "r");
+impl_ops!(isize, "r");
+
+impl_ops_byte!(u8);
+impl_ops!(u16, "x");
+impl_ops!(u32, "e");
+impl_ops!(u64, "r");
+impl_ops!(usize, "r");
diff --git a/samples/rust/rust_percpu.rs b/samples/rust/rust_percpu.rs
index be70ee2e513f..31ab3fcf5c6c 100644
--- a/samples/rust/rust_percpu.rs
+++ b/samples/rust/rust_percpu.rs
@@ -28,6 +28,26 @@
define_per_cpu!(UPERCPU: u64 = 0);
define_per_cpu!(CHECKED: RefCell<u64> = RefCell::new(0));
+macro_rules! make_optimization_test {
+ ($ty:ty) => {
+ let mut test: DynamicPerCpu<$ty> = DynamicPerCpu::new_zero(GFP_KERNEL).unwrap();
+ {
+ let _ = CpuGuard::new();
+ // SAFETY: No other usage of `test`
+ unsafe { test.get_mut(CpuGuard::new()) }.with(|val: &mut $ty| *val = 10);
+ test.num().add(1);
+ // SAFETY: No other usage of `test`
+ unsafe { test.get_mut(CpuGuard::new()) }.with(|val: &mut $ty| assert_eq!(*val, 11));
+ test.num().add(10);
+ // SAFETY: No other usage of `test`
+ unsafe { test.get_mut(CpuGuard::new()) }.with(|val: &mut $ty| assert_eq!(*val, 21));
+ test.num().sub(5);
+ // SAFETY: No other usage of `test`
+ unsafe { test.get_mut(CpuGuard::new()) }.with(|val: &mut $ty| assert_eq!(*val, 16));
+ }
+ };
+}
+
impl kernel::Module for PerCpuMod {
fn init(_module: &'static ThisModule) -> Result<Self, Error> {
pr_info!("rust percpu test start\n");
@@ -208,6 +228,22 @@ fn init(_module: &'static ThisModule) -> Result<Self, Error> {
pr_info!("rust dynamic percpu test done\n");
+ pr_info!("rust numeric optimizations test start\n");
+
+ make_optimization_test!(u8);
+ make_optimization_test!(u16);
+ make_optimization_test!(u32);
+ make_optimization_test!(u64);
+ make_optimization_test!(usize);
+
+ make_optimization_test!(i8);
+ make_optimization_test!(i16);
+ make_optimization_test!(i32);
+ make_optimization_test!(i64);
+ make_optimization_test!(isize);
+
+ pr_info!("rust numeric optimizations test done\n");
+
// Return Err to unload the module
Result::Err(EINVAL)
}
--
2.34.1
Powered by blists - more mailing lists