[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230602075353.5917-1-zhangpeng.00@bytedance.com>
Date: Fri, 2 Jun 2023 15:53:52 +0800
From: Peng Zhang <zhangpeng.00@...edance.com>
To: Liam.Howlett@...cle.com
Cc: akpm@...ux-foundation.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, maple-tree@...ts.infradead.org,
Peng Zhang <zhangpeng.00@...edance.com>
Subject: [PATCH 1/2] maple_tree: optimize mas_wr_append(), also improve duplicating VMAs
When the new range can be completely covered by the original last range
without touching the boundaries on both sides, two new entries can be
appended to the end as a fast path. We update the original last pivot at
the end, and the newly appended two entries will not be accessed before
this, so it is also safe in RCU mode.
This is useful for sequential insertion, which is what we do in
dup_mmap(). Enabling BENCH_FORK in test_maple_tree and just running
bench_forking() gives the following time-consuming numbers:
before: after:
17,874.83 msec 15,738.38 msec
It shows about a 12% performance improvement for duplicating VMAs.
Signed-off-by: Peng Zhang <zhangpeng.00@...edance.com>
---
lib/maple_tree.c | 33 ++++++++++++++++++++++-----------
1 file changed, 22 insertions(+), 11 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 7dd54afe66ed..cfd9fad308a2 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4199,10 +4199,10 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
*
* Return: True if appended, false otherwise
*/
-static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
+static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
{
unsigned char end = wr_mas->node_end;
- unsigned char new_end = end + 1;
struct ma_state *mas = wr_mas->mas;
unsigned char node_pivots = mt_pivots[wr_mas->type];
@@ -4214,16 +4214,27 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
}
- if (mas->last == wr_mas->r_max) {
- /* Append to end of range */
- rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
- wr_mas->pivots[end] = mas->index - 1;
- mas->offset = new_end;
+ if (new_end == wr_mas->node_end + 1) {
+ if (mas->last == wr_mas->r_max) {
+ /* Append to end of range */
+ rcu_assign_pointer(wr_mas->slots[new_end],
+ wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = new_end;
+ } else {
+ /* Append to start of range */
+ rcu_assign_pointer(wr_mas->slots[new_end],
+ wr_mas->content);
+ wr_mas->pivots[end] = mas->last;
+ rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
+ }
} else {
- /* Append to start of range */
+ /* Append to the range without touching any boundaries. */
rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
- wr_mas->pivots[end] = mas->last;
- rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
+ wr_mas->pivots[end + 1] = mas->last;
+ rcu_assign_pointer(wr_mas->slots[end + 1], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = end + 1;
}
if (!wr_mas->content || !wr_mas->entry)
@@ -4270,7 +4281,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
goto slow_path;
/* Attempt to append */
- if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas))
+ if (mas_wr_append(wr_mas, new_end))
return;
if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
--
2.20.1
Powered by blists - more mailing lists