[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241024132225.2271667-3-yukuai1@huaweicloud.com>
Date: Thu, 24 Oct 2024 21:22:15 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: stable@...r.kernel.org,
gregkh@...uxfoundation.org,
harry.wentland@....com,
sunpeng.li@....com,
Rodrigo.Siqueira@....com,
alexander.deucher@....com,
christian.koenig@....com,
Xinhui.Pan@....com,
airlied@...il.com,
daniel@...ll.ch,
viro@...iv.linux.org.uk,
brauner@...nel.org,
Liam.Howlett@...cle.com,
akpm@...ux-foundation.org,
hughd@...gle.com,
willy@...radead.org,
sashal@...nel.org,
srinivasan.shanmugam@....com,
chiahsuan.chung@....com,
mingo@...nel.org,
mgorman@...hsingularity.net,
yukuai3@...wei.com,
chengming.zhou@...ux.dev,
zhangpeng.00@...edance.com,
chuck.lever@...cle.com
Cc: amd-gfx@...ts.freedesktop.org,
dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
maple-tree@...ts.infradead.org,
linux-mm@...ck.org,
yukuai1@...weicloud.com,
yi.zhang@...wei.com,
yangerkun@...wei.com
Subject: [PATCH 6.6 18/28] maple_tree: don't find node end in mtree_lookup_walk()
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
commit 24662decdd44645e8f027d7912be962dd461d1aa upstream.
Since the pivot being set is now reliable, the optimized loop no longer
needs to find the node end. The redundant check for a dead node can also
be avoided as there is no danger of using the wrong pivot since the
results will be thrown out in the case of a dead node by the later check.
This patch also adds a benchmark test for the function to the maple tree
test framework. The benchmark shows an average increase performance of
5.98% over 3 runs with this commit.
Link: https://lkml.kernel.org/r/20231101171629.3612299-12-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
Cc: Peng Zhang <zhangpeng.00@...edance.com>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
lib/maple_tree.c | 12 +++---------
lib/test_maple_tree.c | 21 +++++++++++++++++++++
2 files changed, 24 insertions(+), 9 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 472aef7a3d5c..ad8bf3413889 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -3742,23 +3742,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type;
void __rcu **slots;
unsigned char end;
- unsigned long max;
next = mas->node;
- max = ULONG_MAX;
do {
- offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
- end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
+ end = mt_pivots[type];
+ offset = 0;
do {
- if (pivots[offset] >= mas->index) {
- max = pivots[offset];
+ if (pivots[offset] >= mas->index)
break;
- }
} while (++offset < end);
slots = ma_slots(node, type);
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index f9acc6ef0728..26991888da14 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
+/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
@@ -1754,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
}
#endif
+#if defined(BENCH_LOAD)
+static noinline void __init bench_load(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++)
+ mtree_load(mt, 1470);
+}
+#endif
+
#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
@@ -3620,6 +3634,13 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_LOAD)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_load(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
#if defined(BENCH_FORK)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
--
2.39.2
Powered by blists - more mailing lists