[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20220722160546.1478722-2-Liam.Howlett@oracle.com>
Date: Fri, 22 Jul 2022 16:06:03 +0000
From: Liam Howlett <liam.howlett@...cle.com>
To: "maple-tree@...ts.infradead.org" <maple-tree@...ts.infradead.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Yu Zhao <yuzhao@...gle.com>
CC: Hugh Dickins <hughd@...gle.com>
Subject: [PATCH Fix 1/2] maple_tree: Fix mas_expected_entries() off by one
When inserting nodes, a final call to split the nodes will require a new
parent. Add this as the working area for mas_expected_entries().
Add a maple state flag which will WARN_ON() if there is insufficient
nodes allocated.
Export mas_is_err() to be used in checking mas_store() returns
externally.
Fixes: 06b152b7980a (Maple Tree: add new data structure)
Reported-by: Yu Zhao <yuzhao@...gle.com>
Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
include/linux/maple_tree.h | 1 +
lib/maple_tree.c | 26 +++++++++++++++++++++-----
2 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index bdb891b0d2b5..a30e03b06bed 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -457,6 +457,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry);
void *mas_find(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+bool mas_is_err(struct ma_state *mas);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index d00ad50b258e..a3ead5fb5307 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -64,9 +64,15 @@
#define MA_ROOT_PARENT 1
-/* Maple state flags */
+/*
+ * Maple state flags
+ * * MA_STATE_BULK - Bulk insert mode
+ * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
+ * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
+ */
#define MA_STATE_BULK 1
#define MA_STATE_REBALANCE 2
+#define MA_STATE_PREALLOC 4
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
@@ -243,7 +249,7 @@ static inline bool mas_is_start(struct ma_state *mas)
return mas->node == MAS_START;
}
-static inline bool mas_is_err(struct ma_state *mas)
+bool mas_is_err(struct ma_state *mas)
{
return xa_is_err(mas->node);
}
@@ -1215,6 +1221,12 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
return;
mas_set_alloc_req(mas, 0);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ if (allocated)
+ return;
+ WARN_ON(!allocated);
+ }
+
if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
node = (struct maple_alloc *)mt_alloc_one(gfp);
if (!node)
@@ -5706,6 +5718,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
int ret;
mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
+ mas->mas_flags |= MA_STATE_PREALLOC;
if (likely(!mas_is_err(mas)))
return 0;
@@ -5748,7 +5761,7 @@ void mas_destroy(struct ma_state *mas)
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
- mas->mas_flags &= ~MA_STATE_BULK;
+ mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) {
node = mas->alloc;
@@ -5799,7 +5812,6 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
* insertion of entries.
*/
nr_nodes = max(nr_entries, nr_entries * 2 + 1);
-
if (!mt_is_alloc(mas->tree))
nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
@@ -5807,7 +5819,11 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 1);
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
- mas_node_count(mas, nr_nodes);
+ /* Add one for working room */
+ mas_node_count(mas, nr_nodes + 1);
+
+ /* Detect if allocations run out */
+ mas->mas_flags |= MA_STATE_PREALLOC;
if (!mas_is_err(mas))
return 0;
--
2.35.1
Powered by blists - more mailing lists