[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250723-slub-percpu-caches-v5-14-b792cd830f5d@suse.cz>
Date: Wed, 23 Jul 2025 15:34:47 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Suren Baghdasaryan <surenb@...gle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Christoph Lameter <cl@...two.org>, David Rientjes <rientjes@...gle.com>
Cc: Roman Gushchin <roman.gushchin@...ux.dev>,
Harry Yoo <harry.yoo@...cle.com>, Uladzislau Rezki <urezki@...il.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org, rcu@...r.kernel.org,
maple-tree@...ts.infradead.org, vbabka@...e.cz,
"Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH v5 14/14] maple_tree: Convert forking to use the sheaf
interface
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Use the generic interface which should result in less bulk allocations
during a forking.
A part of this is to abstract the freeing of the sheaf or maple state
allocations into its own function so mas_destroy() and the tree
duplication code can use the same functionality to return any unused
resources.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
lib/maple_tree.c | 42 +++++++++++++++++++++++-------------------
1 file changed, 23 insertions(+), 19 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 9aa782b1497f224e7366ebbd65f997523ee0c8ab..180d5e2ea49440248aaae04a066276406b2537ed 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1178,6 +1178,19 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
mas_set_err(mas, -ENOMEM);
}
+static inline void mas_empty_nodes(struct ma_state *mas)
+{
+ mas->node_request = 0;
+ if (mas->sheaf) {
+ mt_return_sheaf(mas->sheaf);
+ mas->sheaf = NULL;
+ }
+
+ if (mas->alloc) {
+ mt_free_one(mas->alloc);
+ mas->alloc = NULL;
+ }
+}
/*
* mas_free() - Free an encoded maple node
@@ -5414,15 +5427,7 @@ void mas_destroy(struct ma_state *mas)
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
-
- mas->node_request = 0;
- if (mas->sheaf)
- mt_return_sheaf(mas->sheaf);
- mas->sheaf = NULL;
-
- if (mas->alloc)
- mt_free_one(mas->alloc);
- mas->alloc = NULL;
+ mas_empty_nodes(mas);
}
EXPORT_SYMBOL_GPL(mas_destroy);
@@ -6499,7 +6504,7 @@ static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
struct maple_node *node = mte_to_node(mas->node);
struct maple_node *new_node = mte_to_node(new_mas->node);
enum maple_type type;
- unsigned char request, count, i;
+ unsigned char count, i;
void __rcu **slots;
void __rcu **new_slots;
unsigned long val;
@@ -6507,20 +6512,17 @@ static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
/* Allocate memory for child nodes. */
type = mte_node_type(mas->node);
new_slots = ma_slots(new_node, type);
- request = mas_data_end(mas) + 1;
- count = mt_alloc_bulk(gfp, request, (void **)new_slots);
- if (unlikely(count < request)) {
- memset(new_slots, 0, request * sizeof(void *));
- mas_set_err(mas, -ENOMEM);
+ count = mas->node_request = mas_data_end(mas) + 1;
+ mas_alloc_nodes(mas, gfp);
+ if (unlikely(mas_is_err(mas)))
return;
- }
- /* Restore node type information in slots. */
slots = ma_slots(node, type);
for (i = 0; i < count; i++) {
val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
val &= MAPLE_NODE_MASK;
- ((unsigned long *)new_slots)[i] |= val;
+ new_slots[i] = ma_mnode_ptr((unsigned long)mas_pop_node(mas) |
+ val);
}
}
@@ -6574,7 +6576,7 @@ static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
/* Only allocate child nodes for non-leaf nodes. */
mas_dup_alloc(mas, new_mas, gfp);
if (unlikely(mas_is_err(mas)))
- return;
+ goto empty_mas;
} else {
/*
* This is the last leaf node and duplication is
@@ -6607,6 +6609,8 @@ static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
/* Make them the same height */
new_mas->tree->ma_flags = mas->tree->ma_flags;
rcu_assign_pointer(new_mas->tree->ma_root, root);
+empty_mas:
+ mas_empty_nodes(mas);
}
/**
--
2.50.1
Powered by blists - more mailing lists