lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260119142222.4958-1-sef1548@gmail.com>
Date: Mon, 19 Jan 2026 14:22:22 +0000
From: Nick Huang <sef1548@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Ilya Dryomov <idryomov@...il.com>,
	Geert Uytterhoeven <geert+renesas@...der.be>,
	David Gow <davidgow@...gle.com>,
	Nick Huang <sef1548@...il.com>,
	David Sterba <dsterba@...e.com>,
	Chuck Lever <chuck.lever@...cle.com>,
	Chaohai Chen <wdhh6@...yun.com>,
	paladin@...b.edu.tw,
	kusogame68@...il.com,
	n1136402@...b.edu.tw
Subject: [PATCH] kunit: add cross-CPU alloc/free test for kmem_cache

Add a KUnit test to verify that kmem_cache objects can be allocated
on one CPU and freed on another. This ensures SLAB/SLUB correctly
handles cross-CPU frees via per-CPU caches and remote free lists.

The test:
- Allocates an object on CPU0
- Frees it on CPU1
- Allocates again to ensure cache still works

Signed-off-by: Nick Huang <sef1548@...il.com>
---
 lib/tests/slub_kunit.c | 64 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
index 848b682a2..c556723d4 100644
--- a/lib/tests/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -7,6 +7,8 @@
 #include <linux/kernel.h>
 #include <linux/rcupdate.h>
 #include <linux/delay.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
 #include "../mm/slab.h"
 
 static struct kunit_resource resource;
@@ -290,7 +292,68 @@ static void test_krealloc_redzone_zeroing(struct kunit *test)
 	kasan_enable_current();
 	kmem_cache_destroy(s);
 }
+struct cache_obj {
+    struct kmem_cache *cache;
+    void *obj;
+    struct kunit *test;
+};
+
+/* CPU execute  alloc */
+static void alloc_obj_fn(void *arg)
+{
+    struct cache_obj *co = arg;
+
+    co->obj = kmem_cache_alloc(co->cache, GFP_KERNEL);
+    KUNIT_ASSERT_NOT_NULL(co->test, co->obj);
+}
 
+/* CPU execute  free */
+static void free_obj_fn(void *arg)
+{
+    struct cache_obj *co = arg;
+    kmem_cache_free(co->cache, co->obj);
+}
+
+static void test_cross_cpu_alloc_free(struct kunit *test)
+{
+    struct kmem_cache *cache;
+    struct cache_obj co;
+    int cpu0, cpu1;
+
+    if ( num_online_cpus() < 2)
+        kunit_skip(test, "need >= 2 CPUs");
+
+    /* Create a test cache */
+    cache = kmem_cache_create("cross_cpu_test",
+                              64, 0, SLAB_DEBUG_FLAGS, NULL);
+    KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+    /* Pick two different online CPUs */
+    cpu0 = cpumask_first(cpu_online_mask);
+    cpu1 = cpumask_next(cpu0, cpu_online_mask);
+
+    /* Fill the struct to pass as argument */
+    co.cache = cache;
+    co.obj = NULL;
+    co.test = test;
+
+    /* CPU0 alloc */
+    KUNIT_ASSERT_EQ(test, smp_call_function_single(cpu0,
+                                                   alloc_obj_fn,
+                                                   &co, 1), 0);
+
+    /* CPU1 free */
+    KUNIT_ASSERT_EQ(test, smp_call_function_single(cpu1,
+                                                   free_obj_fn,
+                                                   &co, 1), 0);
+
+    /* Allocate again (any CPU) */
+    co.obj = kmem_cache_alloc(cache, GFP_KERNEL);
+    KUNIT_ASSERT_NOT_NULL(test, co.obj);
+
+    kmem_cache_free(cache, co.obj);
+    kmem_cache_destroy(cache);
+}
 static int test_init(struct kunit *test)
 {
 	slab_errors = 0;
@@ -315,6 +378,7 @@ static struct kunit_case test_cases[] = {
 	KUNIT_CASE(test_kfree_rcu_wq_destroy),
 	KUNIT_CASE(test_leak_destroy),
 	KUNIT_CASE(test_krealloc_redzone_zeroing),
+        KUNIT_CASE(test_cross_cpu_alloc_free),
 	{}
 };
 
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ