[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250901-maple-sheaves-v1-9-d6a1166b53f2@suse.cz>
Date: Mon, 01 Sep 2025 13:08:59 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: "Liam R. Howlett" <Liam.Howlett@...cle.com>,
Matthew Wilcox <willy@...radead.org>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>, Jann Horn <jannh@...gle.com>,
Pedro Falcato <pfalcato@...e.de>, Suren Baghdasaryan <surenb@...gle.com>
Cc: Harry Yoo <harry.yoo@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>, maple-tree@...ts.infradead.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH 09/12] tools: Add sheaf to slab testing
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Add the sheaf structs to the slab header and the functions to the
testing/shared/linux.c file.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
tools/include/linux/slab.h | 28 ++++++++++++++
tools/testing/shared/linux.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 117 insertions(+)
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index c5c5cc6db5668be2cc94c29065ccfa7ca7b4bb08..94937a699402bd1f31887dfb52b6fd0a3c986f43 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -123,6 +123,18 @@ struct kmem_cache_args {
void (*ctor)(void *);
};
+struct slab_sheaf {
+ union {
+ struct list_head barn_list;
+ /* only used for prefilled sheafs */
+ unsigned int capacity;
+ };
+ struct kmem_cache *cache;
+ unsigned int size;
+ int node; /* only used for rcu_sheaf */
+ void *objects[];
+};
+
static inline void *kzalloc(size_t size, gfp_t gfp)
{
return kmalloc(size, gfp | __GFP_ZERO);
@@ -173,5 +185,21 @@ __kmem_cache_create(const char *name, unsigned int size, unsigned int align,
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
void **list);
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
+
+static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
+{
+ return sheaf->size;
+}
#endif /* _TOOLS_SLAB_H */
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index 97b8412ccbb6d222604c7b397c53c65618d8d51b..4ceff7969b78cf8e33cd1e021c68bc9f8a02a7a1 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -137,6 +137,12 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
if (kmalloc_verbose)
pr_debug("Bulk free %p[0-%zu]\n", list, size - 1);
+ if (cachep->exec_callback) {
+ if (cachep->callback)
+ cachep->callback(cachep->private);
+ cachep->exec_callback = false;
+ }
+
pthread_mutex_lock(&cachep->lock);
for (int i = 0; i < size; i++)
kmem_cache_free_locked(cachep, list[i]);
@@ -242,6 +248,89 @@ __kmem_cache_create_args(const char *name, unsigned int size,
return ret;
}
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
+{
+ struct slab_sheaf *sheaf;
+ unsigned int capacity;
+
+ if (s->exec_callback) {
+ if (s->callback)
+ s->callback(s->private);
+ s->exec_callback = false;
+ }
+
+ capacity = max(size, s->sheaf_capacity);
+
+ sheaf = calloc(1, sizeof(*sheaf) + sizeof(void *) * capacity);
+ if (!sheaf)
+ return NULL;
+
+ sheaf->cache = s;
+ sheaf->capacity = capacity;
+ sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects);
+ if (!sheaf->size) {
+ free(sheaf);
+ return NULL;
+ }
+
+ return sheaf;
+}
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size)
+{
+ struct slab_sheaf *sheaf = *sheafp;
+ int refill;
+
+ if (sheaf->size >= size)
+ return 0;
+
+ if (size > sheaf->capacity) {
+ sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
+ if (!sheaf)
+ return -ENOMEM;
+
+ kmem_cache_return_sheaf(s, gfp, *sheafp);
+ *sheafp = sheaf;
+ return 0;
+ }
+
+ refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size,
+ &sheaf->objects[sheaf->size]);
+ if (!refill)
+ return -ENOMEM;
+
+ sheaf->size += refill;
+ return 0;
+}
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf)
+{
+ if (sheaf->size)
+ kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
+
+ free(sheaf);
+}
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf)
+{
+ void *obj;
+
+ if (sheaf->size == 0) {
+ printf("Nothing left in sheaf!\n");
+ return NULL;
+ }
+
+ obj = sheaf->objects[--sheaf->size];
+ sheaf->objects[sheaf->size] = NULL;
+
+ return obj;
+}
+
/*
* Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
*/
--
2.51.0
Powered by blists - more mailing lists