lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260115193647.1695937-13-Liam.Howlett@oracle.com>
Date: Thu, 15 Jan 2026 14:36:31 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: maple-tree@...ts.infradead.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, Suren Baghdasaryan <surenb@...gle.com>,
        Matthew Wilcox <willy@...radead.org>,
        Sidhartha Kumar <sidhartha.kumar@...cle.com>,
        Vlastimil Babka <vbabka@...e.cz>, Alice Ryhl <aliceryhl@...gle.com>,
        Kuninori Morimoto <kuninori.morimoto.gx@...esas.com>,
        Geert Uytterhoeven <geert@...ux-m68k.org>,
        Arnd Bergmann <arnd@...db.de>, Christian Kujau <lists@...dbynature.de>,
        "Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: [PATCH 12/28] maple_tree: Inline mas_spanning_rebalance_loop() into mas_wr_spanning_rebalance()

Just copy the code and replace count with height.  This is done to avoid
affecting other code paths into mas_spanning_rebalance_loop() for the
next change.

No functional change intended.

Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com>
---
 lib/maple_tree.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 107 insertions(+), 1 deletion(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 8927e8e1b8012..969be457f6df8 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -2855,6 +2855,13 @@ static void mas_spanning_rebalance(struct ma_state *mas,
 static noinline void mas_wr_spanning_rebalance(struct ma_state *mas,
 		struct ma_wr_state *l_wr_mas, struct ma_wr_state *r_wr_mas)
 {
+
+	unsigned char split, mid_split;
+	unsigned char slot = 0;
+	unsigned char new_height = 0; /* used if node is a new root */
+	struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
+	struct maple_enode *old_enode;
+
 	struct maple_subtree_state mast;
 	struct maple_big_node b_node;
 	struct maple_copy cp;
@@ -2910,7 +2917,106 @@ static noinline void mas_wr_spanning_rebalance(struct ma_state *mas,
 		mast_spanning_rebalance(&mast);
 
 	height = mas_mt_height(mas) + 1;
-	mas_spanning_rebalance_loop(mas, &mast, height);
+
+	/*
+	 * Each level of the tree is examined and balanced, pushing data to the left or
+	 * right, or rebalancing against left or right nodes is employed to avoid
+	 * rippling up the tree to limit the amount of churn.  Once a new sub-section of
+	 * the tree is created, there may be a mix of new and old nodes.  The old nodes
+	 * will have the incorrect parent pointers and currently be in two trees: the
+	 * original tree and the partially new tree.  To remedy the parent pointers in
+	 * the old tree, the new data is swapped into the active tree and a walk down
+	 * the tree is performed and the parent pointers are updated.
+	 * See mas_topiary_replace() for more information.
+	 */
+	while (height--) {
+		mast.bn->b_end--;
+		mast.bn->type = mte_node_type(mast.orig_l->node);
+		split = mas_mab_to_node(mas, mast.bn, &left, &right, &middle,
+					&mid_split);
+		mast_set_split_parents(&mast, left, middle, right, split,
+				       mid_split);
+		mast_cp_to_nodes(&mast, left, middle, right, split, mid_split);
+		new_height++;
+
+		/*
+		 * Copy data from next level in the tree to mast.bn from next
+		 * iteration
+		 */
+		memset(mast.bn, 0, sizeof(struct maple_big_node));
+		mast.bn->type = mte_node_type(left);
+
+		/* Root already stored in l->node. */
+		if (mas_is_root_limits(mast.l))
+			goto new_root;
+
+		mast_ascend(&mast);
+		mast_combine_cp_left(&mast);
+		mast.l->offset = mast.bn->b_end;
+		mab_set_b_end(mast.bn, mast.l, left);
+		mab_set_b_end(mast.bn, mast.m, middle);
+		mab_set_b_end(mast.bn, mast.r, right);
+
+		/* Copy anything necessary out of the right node. */
+		mast_combine_cp_right(&mast);
+		mast.orig_l->last = mast.orig_l->max;
+
+		if (mast_sufficient(&mast)) {
+			if (mast_overflow(&mast))
+				continue;
+
+			if (mast.orig_l->node == mast.orig_r->node) {
+			       /*
+				* The data in b_node should be stored in one
+				* node and in the tree
+				*/
+				slot = mast.l->offset;
+				break;
+			}
+
+			continue;
+		}
+
+		/* May be a new root stored in mast.bn */
+		if (mas_is_root_limits(mast.orig_l))
+			break;
+
+		mast_spanning_rebalance(&mast);
+
+		/* rebalancing from other nodes may require another loop. */
+		if (!height)
+			height++;
+	}
+
+	mast.l->node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
+				mte_node_type(mast.orig_l->node));
+
+	mab_mas_cp(mast.bn, 0, mt_slots[mast.bn->type] - 1, mast.l, true);
+	new_height++;
+	mas_set_parent(mas, left, mast.l->node, slot);
+	if (middle)
+		mas_set_parent(mas, middle, mast.l->node, ++slot);
+
+	if (right)
+		mas_set_parent(mas, right, mast.l->node, ++slot);
+
+	if (mas_is_root_limits(mast.l)) {
+new_root:
+		mas_mn(mast.l)->parent = ma_parent_ptr(mas_tree_parent(mas));
+		while (!mte_is_root(mast.orig_l->node))
+			mast_ascend(&mast);
+	} else {
+		mas_mn(mast.l)->parent = mas_mn(mast.orig_l)->parent;
+	}
+
+	old_enode = mast.orig_l->node;
+	mas->depth = mast.l->depth;
+	mas->node = mast.l->node;
+	mas->min = mast.l->min;
+	mas->max = mast.l->max;
+	mas->offset = mast.l->offset;
+	mas_wmb_replace(mas, old_enode, new_height);
+	mtree_range_walk(mas);
 }
 /*
  * mas_rebalance() - Rebalance a given node.
-- 
2.47.3


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ