[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231101171629.3612299-12-Liam.Howlett@oracle.com>
Date: Wed, 1 Nov 2023 13:16:28 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: maple-tree@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
"Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH 11/12] maple_tree: Don't find node end in mtree_lookup_walk()
Since the pivot being set is now reliable, the optimized loop no longer
needs to find the node end. The redundant check for a dead node can
also be avoided as there is no danger of using the wrong pivot since the
results will be thrown out in the case of a dead node by the later
check.
This patch also adds a benchmark test for the function to the maple tree
test framework. The benchmark shows an average increase performance of
5.98% over 3 runs with this commit.
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
lib/maple_tree.c | 12 +++---------
lib/test_maple_tree.c | 21 +++++++++++++++++++++
2 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index e45734676471..a91adaf17306 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -3732,23 +3732,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type;
void __rcu **slots;
unsigned char end;
- unsigned long max;
next = mas->node;
- max = ULONG_MAX;
do {
- offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
- end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
+ end = mt_pivots[type];
+ offset = 0;
do {
- if (pivots[offset] >= mas->index) {
- max = pivots[offset];
+ if (pivots[offset] >= mas->index)
break;
- }
} while (++offset < end);
slots = ma_slots(node, type);
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index b82c02f15380..d36dc64a93e4 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -42,6 +42,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
+/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
@@ -1753,6 +1754,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
}
#endif
+#if defined(BENCH_LOAD)
+static noinline void __init bench_load(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++)
+ mtree_load(mt, 1470);
+}
+#endif
+
#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
@@ -3606,6 +3620,13 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_LOAD)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_load(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
#if defined(BENCH_FORK)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
--
2.40.1
Powered by blists - more mailing lists