[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240607185257.963768-10-sidhartha.kumar@oracle.com>
Date: Fri, 7 Jun 2024 11:52:50 -0700
From: Sidhartha Kumar <sidhartha.kumar@...cle.com>
To: linux-kernel@...r.kernel.org, maple-tree@...ts.infradead.org
Cc: linux-mm@...ck.org, akpm@...ux-foundation.org, liam.howlett@...cle.com,
willy@...radead.org, Sidhartha Kumar <sidhartha.kumar@...cle.com>
Subject: [PATCH v2 09/16] maple_tree: use store type in mas_wr_store_entry()
When storing an entry, we can read the store type that was set from a
previous partial walk of the tree. Now that the type of store is known,
select the correct write helper function to use to complete the store.
Also noinline mas_wr_spanning_store() to limit stack frame usage in
mas_wr_store_entry() as it allocates a maple_big_node on the stack.
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
---
lib/maple_tree.c | 55 +++++++++++++++++++++++++++++++-----------------
1 file changed, 36 insertions(+), 19 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 91b18eef81e1..2c42e99c400c 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -3780,7 +3780,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
*
* Return: 0 on error, positive on success.
*/
-static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+static noinline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
@@ -4206,27 +4206,42 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end = mas_wr_new_end(wr_mas);
- wr_mas->content = mas_start(mas);
- if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ switch (mas->store_type) {
+ case wr_invalid:
+ MT_BUG_ON(mas->tree, 1);
+ return NULL;
+ case wr_new_root:
+ mas_new_root(mas, wr_mas->entry);
+ break;
+ case wr_store_root:
mas_store_root(mas, wr_mas->entry);
- return wr_mas->content;
- }
-
- if (unlikely(!mas_wr_walk(wr_mas))) {
+ break;
+ case wr_exact_fit:
+ rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
+ if (!!wr_mas->entry ^ !!wr_mas->content)
+ mas_update_gap(mas);
+ break;
+ case wr_append:
+ mas_wr_append(wr_mas, new_end);
+ break;
+ case wr_slot_store:
+ mas_wr_slot_store(wr_mas);
+ break;
+ case wr_node_store:
+ mas_wr_node_store(wr_mas, new_end);
+ break;
+ case wr_spanning_store:
mas_wr_spanning_store(wr_mas);
- return wr_mas->content;
- }
-
- /* At this point, we are at the leaf node that needs to be altered. */
- mas_wr_end_piv(wr_mas);
- /* New root for a single pointer */
- if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
- mas_new_root(mas, wr_mas->entry);
- return wr_mas->content;
+ break;
+ case wr_split_store:
+ case wr_rebalance:
+ case wr_bnode:
+ mas_wr_bnode(wr_mas);
+ break;
}
- mas_wr_modify(wr_mas);
return wr_mas->content;
}
@@ -5590,7 +5605,8 @@ void *mas_store(struct ma_state *mas, void *entry)
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
- mas_wr_store_setup(&wr_mas);
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
mas_wr_store_entry(&wr_mas);
return wr_mas.content;
}
@@ -5639,7 +5655,8 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
- mas_wr_store_setup(&wr_mas);
+ mas_wr_prealloc_setup(&wr_mas);
+ mas_wr_store_type(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
--
2.45.2
Powered by blists - more mailing lists