[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220915010957.25506-2-richard.weiyang@gmail.com>
Date: Thu, 15 Sep 2022 01:09:57 +0000
From: Wei Yang <richard.weiyang@...il.com>
To: willy@...radead.org, richard.weiyang@...il.com
Cc: linux-kernel@...r.kernel.org
Subject: [Patch v2 2/2] XArray: Fix xas_create_range() when lower multi-order entry present
If there is already an lower order entry present, xas_create_range()
would face two problems:
* When new_order is roundup(order, XA_CHUNK_SHIFT), it would go up and
access root->parent
* When there is holes in lower order range, no proper entry is created
This patch tries to fix this issue by adjust to proper next_index if we
found a multi-order entry.
Signed-off-by: Wei Yang <richard.weiyang@...il.com>
---
v2:
* guard result check with rcu lock
* instead of continue the iteration on multi-order entry, xas_set() the
index to let xas_create() restart
* put hole in each possible position
---
lib/test_xarray.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++
lib/xarray.c | 22 +++++++++++++---
2 files changed, 84 insertions(+), 3 deletions(-)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index e77d4856442c..5ec9c19ad65e 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1482,6 +1482,70 @@ static noinline void check_create_range_5(struct xarray *xa,
xa_destroy(xa);
}
+static noinline void check_create_range_6(struct xarray *xa)
+{
+ unsigned long index = 0;
+ unsigned int order, next_order;
+
+ order = 2 * XA_CHUNK_SHIFT - 2;
+
+ for (next_order = order + 1; next_order <= roundup(order, XA_CHUNK_SHIFT) + 1;
+ next_order++) {
+ unsigned long hole;
+
+ for (hole = 0; hole < (1 << next_order); hole += 1 << order) {
+ XA_STATE(load_xas, xa, 0);
+ XA_STATE_ORDER(xas, xa, 0, next_order);
+
+ for (index = 0; index < (1 << next_order);
+ index += 1 << order) {
+ if (index == hole)
+ continue;
+ xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL);
+ }
+
+ // [hole, hole + (1 << order) - 1] is empty now
+ xas_set(&load_xas, hole);
+ rcu_read_lock();
+ xas_load(&load_xas);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift == 0);
+ rcu_read_unlock();
+
+ xas_set(&load_xas, hole + (1 << order) - 1);
+ rcu_read_lock();
+ xas_load(&load_xas);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift == 0);
+ rcu_read_unlock();
+
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ // [hole, hole + (1 << order) - 1] is created now
+ xas_set(&load_xas, hole);
+ rcu_read_lock();
+ xas_load(&load_xas);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift != 0);
+ rcu_read_unlock();
+
+ xas_set(&load_xas, hole + (1 << order) - 1);
+ rcu_read_lock();
+ xas_load(&load_xas);
+ XA_BUG_ON(xa, load_xas.xa_node == NULL);
+ XA_BUG_ON(xa, load_xas.xa_node->shift != 0);
+ rcu_read_unlock();
+
+ xa_destroy(xa);
+ }
+ }
+}
+
static noinline void check_create_range(struct xarray *xa)
{
unsigned int order;
@@ -1515,6 +1579,7 @@ static noinline void check_create_range(struct xarray *xa)
}
check_create_range_3();
+ check_create_range_6(xa);
}
static noinline void __check_store_range(struct xarray *xa, unsigned long first,
diff --git a/lib/xarray.c b/lib/xarray.c
index ed50a26d97a3..8b3df256b407 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -694,6 +694,7 @@ void xas_create_range(struct xa_state *xas)
unsigned long index = xas->xa_index;
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
+ struct xa_node *node;
xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
@@ -709,14 +710,29 @@ void xas_create_range(struct xa_state *xas)
goto success;
xas->xa_index -= XA_CHUNK_SIZE;
+ node = xas->xa_node;
+ if (node->shift) {
+ unsigned long next_index = xas->xa_index >> node->shift;
+
+ next_index &= ~XA_CHUNK_MASK;
+ next_index += xas->xa_offset;
+ next_index <<= node->shift;
+
+ if (next_index <= (index & ~XA_CHUNK_MASK))
+ goto success;
+
+ xas_set(xas, next_index - 1);
+ continue;
+ }
+
for (;;) {
- struct xa_node *node = xas->xa_node;
- if (node->shift >= shift)
- break;
xas->xa_node = xa_parent_locked(xas->xa, node);
+ if (!xas->xa_node)
+ break;
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
break;
+ node = xas->xa_node;
}
}
--
2.33.1
Powered by blists - more mailing lists