[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <81b26efe-5099-4238-a58f-65bf4c50e021@nvidia.com>
Date: Wed, 24 Sep 2025 19:17:57 -0700
From: John Hubbard <jhubbard@...dia.com>
To: Elijah Wright <git@...jahs.space>, Miguel Ojeda <ojeda@...nel.org>,
Alex Gaynor <alex.gaynor@...il.com>, Boqun Feng <boqun.feng@...il.com>,
Gary Guo <gary@...yguo.net>, Björn Roy Baron
<bjorn3_gh@...tonmail.com>, Benno Lossin <lossin@...nel.org>,
Andreas Hindborg <a.hindborg@...nel.org>, Alice Ryhl <aliceryhl@...gle.com>,
Trevor Gross <tmgross@...ch.edu>, Danilo Krummrich <dakr@...nel.org>,
rust-for-linux@...r.kernel.org, linux-kernel@...r.kernel.org,
"linux-mm@...ck.org" <linux-mm@...ck.org>
Subject: Re: [PATCH] rust: slab: add basic slab module
On 9/24/25 12:36 PM, Elijah Wright wrote:
> this patch adds a basic slab module for kmem_cache, primarily wrapping
> kmem_cache_create, kmem_cache_alloc, kmem_cache_free, and kmem_cache_destroy.
+Cc linux-mm
Maybe that's not required, but if so I'll be quite surprised. :)
thanks,
John Hubbard
>
> Signed-off-by: Elijah Wright <git@...jahs.space>
> ---
> rust/helpers/slab.c | 10 ++++++
> rust/kernel/lib.rs | 1 +
> rust/kernel/slab.rs | 85 +++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 96 insertions(+)
> create mode 100644 rust/kernel/slab.rs
>
> diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
> index a842bfbddcba..799de7bc1405 100644
> --- a/rust/helpers/slab.c
> +++ b/rust/helpers/slab.c
> @@ -13,3 +13,13 @@ rust_helper_kvrealloc(const void *p, size_t size, gfp_t flags)
> {
> return kvrealloc(p, size, flags);
> }
> +
> +struct kmem_cache * rust_helper_kmem_cache_create(const char *name, unsigned int size, unsigned int align, gfp_t flags, void (*ctor)(void *))
> +{
> + return kmem_cache_create(name, size, align, flags, NULL);
> +}
> +
> +void * rust_helper_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
> +{
> + return kmem_cache_alloc(cachep, flags);
> +}
> \ No newline at end of file
> diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
> index fef97f2a5098..bd76eadbe297 100644
> --- a/rust/kernel/lib.rs
> +++ b/rust/kernel/lib.rs
> @@ -116,6 +116,7 @@
> pub mod security;
> pub mod seq_file;
> pub mod sizes;
> +pub mod slab;
> mod static_assert;
> #[doc(hidden)]
> pub mod std_vendor;
> diff --git a/rust/kernel/slab.rs b/rust/kernel/slab.rs
> new file mode 100644
> index 000000000000..8b418f9db7cb
> --- /dev/null
> +++ b/rust/kernel/slab.rs
> @@ -0,0 +1,85 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +//! Slab bindings.
> +//!
> +//! C header: [`include/linux/slab.h`](srctree/include/linux/slab.h)
> +
> +use core::{marker::PhantomData, mem, ptr::NonNull};
> +
> +use crate::{
> + alloc::Flags,
> + bindings,
> + error::{code::ENOMEM, Result},
> + str::CStr,
> +};
> +
> +/// A wrapper for kmem_cache that allocates objects of type `T`.
> +#[repr(transparent)]
> +pub struct Slab<T> {
> + cache: NonNull<bindings::kmem_cache>,
> + _p: PhantomData<T>,
> +}
> +
> +impl<T> Slab<T> {
> + /// Creates a cache for objects of type `T`.
> + pub fn try_new(name: &CStr, flags: Flags) -> Result<Self> {
> + let size = mem::size_of::<T>();
> + let align = mem::align_of::<T>();
> + debug_assert!(size <= usize::MAX);
> + debug_assert!(align <= usize::MAX);
> +
> + // SAFETY: `flags` is a valid impl, `name` is a valid C string, and
> + // other arguments are plain values.
> + let cache = unsafe {
> + bindings::kmem_cache_create(
> + name.as_char_ptr(),
> + size as u32,
> + align as u32,
> + flags.as_raw(),
> + None,
> + )
> + };
> +
> + NonNull::new(cache)
> + .map(|c| Slab {
> + cache: c,
> + _p: PhantomData,
> + })
> + .ok_or(ENOMEM)
> + }
> +
> + /// Allocates one object from the cache with the given gfp flags.
> + #[inline]
> + pub fn alloc(&self, flags: Flags) -> Result<NonNull<T>> {
> + // SAFETY: `self.cache` is a valid pointer obtained from
> + // `kmem_cache_create` and still alive because `self` is borrowed.
> + let ptr = unsafe { bindings::kmem_cache_alloc(self.cache.as_ptr(), flags.as_raw()) };
> + NonNull::new(ptr.cast()).ok_or(ENOMEM)
> + }
> +
> + /// Frees an object previously returned by `alloc()`.
> + ///
> + /// # Safety
> + /// The caller must guarantee that `obj` was allocated from this cache and
> + /// is no longer accessed afterwards.
> + #[inline]
> + pub unsafe fn free(&self, obj: NonNull<T>) {
> + // SAFETY: By the safety contract the pointer is valid and unique at
> + // this point.
> + unsafe { bindings::kmem_cache_free(self.cache.as_ptr(), obj.cast().as_ptr()) };
> + }
> +
> + /// Returns the raw mutable pointer to the cache
> + #[inline]
> + pub fn as_ptr(&self) -> *mut bindings::kmem_cache {
> + self.cache.as_ptr()
> + }
> +}
> +
> +impl<T> Drop for Slab<T> {
> + fn drop(&mut self) {
> + // SAFETY: `self.cache` is valid and we are the final owner because
> + // of ownership rules.
> + unsafe { bindings::kmem_cache_destroy(self.cache.as_ptr()) };
> + }
> +}
Powered by blists - more mailing lists