lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Tue, 4 Jun 2024 15:07:46 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Sidhartha Kumar <sidhartha.kumar@...cle.com>
Cc: linux-kernel@...r.kernel.org, maple-tree@...ts.infradead.org,
        linux-mm@...ck.org, akpm@...ux-foundation.org,
        zhangpeng.00@...edance.com, willy@...radead.org
Subject: Re: [PATCH 04/18] maple_tree: introduce mas_wr_store_type()

* Sidhartha Kumar <sidhartha.kumar@...cle.com> [240604 13:42]:
> Introduce mas_wr_store_type() which will set the correct store type
> based on a walk of the tree.
> 
> mas_prealloc_calc() is also introduced to abstract the calculation used
> to determine the number of nodes needed for a store operation.
> 
> Also, add a test case to validate the ordering for store type checks is
> correct. This test models a vma expanding and then shrinking which is part
> of the boot process.
> 
> mas_wr_preallocate() is introduced as a wrapper function to set the store
> type and preallcoate enough nodes.
> 
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
> ---
>  lib/maple_tree.c                 | 210 ++++++++++++++++++++++---------
>  tools/testing/radix-tree/maple.c |  35 ++++++
>  2 files changed, 186 insertions(+), 59 deletions(-)
> 
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 2558d15bb748..b37fa8690558 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4278,6 +4278,150 @@ static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
>  	wr_mas->content = mas_start(mas);
>  }
>  
> +/**
> + * mas_prealloc_calc() - Calculate number of nodes needed for a
> + * given store oepration
> + * @mas: The maple state.
> + *
> + * Return: Number of nodes required for preallocation.
> + */
> +static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
> +{
> +	int ret = mas_mt_height(mas) * 3 + 1;
> +
> +	switch (mas->store_type) {
> +	case wr_invalid:
> +		WARN_ON_ONCE(1);
> +		break;
> +	case wr_new_root:
> +		ret = 1;
> +		break;
> +	case wr_store_root:
> +		if (likely((mas->last != 0) || (mas->index != 0)))
> +			ret = 1;
> +		else if (((unsigned long) (entry) & 3) == 2)
> +			ret = 1;
> +		else
> +			ret = 0;
> +		break;
> +	case wr_spanning_store:
> +		ret =  mas_mt_height(mas) * 3 + 1;
> +		break;
> +	case wr_split_store:
> +		ret =  mas_mt_height(mas) * 2 + 1;
> +		break;
> +	case wr_rebalance:
> +		ret =  mas_mt_height(mas) * 2 - 1;
> +		break;
> +	case wr_node_store:
> +	case wr_bnode:
> +		ret = mt_in_rcu(mas->tree) ? 1 : 0;
> +		break;
> +	case wr_append:
> +	case wr_exact_fit:
> +	case wr_slot_store:
> +		ret = 0;
> +	}
> +
> +	return ret;
> +}
> +
> +/*
> + * mas_wr_store_type() - Set the store type for a given
> + * store operation.
> + * @wr_mas: The maple write state
> + */
> +static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
> +{
> +	struct ma_state *mas = wr_mas->mas;
> +	unsigned char new_end;
> +
> +	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
> +		mas->store_type = wr_store_root;
> +		return;
> +	}
> +
> +	if (unlikely(!mas_wr_walk(wr_mas))) {
> +		mas->store_type = wr_spanning_store;
> +		return;
> +	}
> +
> +	/* At this point, we are at the leaf node that needs to be altered. */
> +	mas_wr_end_piv(wr_mas);
> +	if (!wr_mas->entry)
> +		mas_wr_extend_null(wr_mas);
> +
> +	new_end = mas_wr_new_end(wr_mas);
> +	if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
> +		mas->store_type = wr_exact_fit;
> +		return;
> +	}
> +
> +	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
> +		mas->store_type = wr_new_root;
> +		return;
> +	}
> +
> +	/* Potential spanning rebalance collapsing a node */
> +	if (new_end < mt_min_slots[wr_mas->type]) {
> +		if (!mte_is_root(mas->node)) {
> +			mas->store_type = wr_rebalance;
> +			return;
> +		}
> +		mas->store_type = wr_node_store;
> +		return;
> +	}
> +
> +	if (new_end >= mt_slots[wr_mas->type]) {
> +		mas->store_type = wr_split_store;
> +		return;
> +	}
> +
> +	if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
> +		mas->store_type = wr_append;
> +		return;
> +	}
> +
> +	if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
> +		(wr_mas->offset_end - mas->offset == 1))) {
> +		mas->store_type = wr_slot_store;
> +		return;
> +	}
> +
> +	if (mte_is_root(mas->node) || !(new_end <= mt_min_slots[wr_mas->type]) ||
> +		(mas->mas_flags & MA_STATE_BULK)) {
> +		mas->store_type = wr_node_store;
> +		return;
> +	}
> +
> +	mas->store_type = wr_bnode;
> +}
> +
> +/**
> + * mas_wr_preallocate() - Preallocate enough nodes for a store operation
> + * @wr_mas: The maple write state
> + * @entry: The entry that will be stored
> + * @gfp: The GFP_FLAGS to use for allocations.
> + *
> + */
> +static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry, gfp_t gfp)
> +{
> +	struct ma_state *mas = wr_mas->mas;
> +	int request;
> +
> +	mas_wr_prealloc_setup(wr_mas);
> +	mas_wr_store_type(wr_mas);
> +	request = mas_prealloc_calc(mas, entry);
> +	if (!request)
> +		return;
> +
> +	mas_node_count_gfp(mas, request, gfp);
> +	if (likely(!mas_is_err(mas)))
> +		return;
> +
> +	mas_set_alloc_req(mas, 0);
> +}
> +
>  /**
>   * mas_insert() - Internal call to insert a value
>   * @mas: The maple state
> @@ -5506,69 +5650,17 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc);
>  int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
>  {
>  	MA_WR_STATE(wr_mas, mas, entry);
> -	unsigned char node_size;
> -	int request = 1;
> -	int ret;
> -
> -
> -	if (unlikely(!mas->index && mas->last == ULONG_MAX))
> -		goto ask_now;
> -
> -	mas_wr_prealloc_setup(&wr_mas);
> -	/* Root expand */
> -	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
> -		goto ask_now;
> -
> -	if (unlikely(!mas_wr_walk(&wr_mas))) {
> -		/* Spanning store, use worst case for now */
> -		request = 1 + mas_mt_height(mas) * 3;
> -		goto ask_now;
> -	}
> -
> -	/* At this point, we are at the leaf node that needs to be altered. */
> -	/* Exact fit, no nodes needed. */
> -	if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
> -		return 0;
> -
> -	mas_wr_end_piv(&wr_mas);
> -	node_size = mas_wr_new_end(&wr_mas);
> -
> -	/* Slot store, does not require additional nodes */
> -	if (node_size == mas->end) {
> -		/* reuse node */
> -		if (!mt_in_rcu(mas->tree))
> -			return 0;
> -		/* shifting boundary */
> -		if (wr_mas.offset_end - mas->offset == 1)
> -			return 0;
> -	}
> +	int ret = 0;
>  
> -	if (node_size >= mt_slots[wr_mas.type]) {
> -		/* Split, worst case for now. */
> -		request = 1 + mas_mt_height(mas) * 2;
> -		goto ask_now;
> +	mas_wr_preallocate(&wr_mas, entry, gfp);
> +	if (mas_is_err(mas)) {
> +		ret = xa_err(mas->node);

mas_reset(mas); was silently dropped here.  I don't think that's wrong,
but it should probably be mentioned or commented on.  I believe the
reset was necessary for the rebalance case, which may not be necessary
anymore and probably not an issue here.  Since the state is separated
from the node in the maple state, it may not be necessary to reset at
all anymore.

> +		mas_destroy(mas);
> +		mas_reset(mas);
> +		return ret;
>  	}
>  
> -	/* New root needs a single node */
> -	if (unlikely(mte_is_root(mas->node)))
> -		goto ask_now;
> -
> -	/* Potential spanning rebalance collapsing a node, use worst-case */
> -	if (node_size  - 1 <= mt_min_slots[wr_mas.type])
> -		request = mas_mt_height(mas) * 2 - 1;
> -
> -	/* node store, slot store needs one node */
> -ask_now:
> -	mas_node_count_gfp(mas, request, gfp);
>  	mas->mas_flags |= MA_STATE_PREALLOC;
> -	if (likely(!mas_is_err(mas)))
> -		return 0;
> -
> -	mas_set_alloc_req(mas, 0);
> -	ret = xa_err(mas->node);
> -	mas_reset(mas);
> -	mas_destroy(mas);
> -	mas_reset(mas);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(mas_preallocate);
> diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
> index f1caf4bcf937..c57979de1576 100644
> --- a/tools/testing/radix-tree/maple.c
> +++ b/tools/testing/radix-tree/maple.c
> @@ -36223,6 +36223,37 @@ static noinline void __init check_mtree_dup(struct maple_tree *mt)
>  
>  extern void test_kmem_cache_bulk(void);
>  
> +
> + /* test to simulate expanding a vma from [0x7fffffffe000, 0x7ffffffff000)
> +  * to [0x7ffde4ca1000, 0x7ffffffff000) and then shrinking the vma to
> +  * [0x7ffde4ca1000, 0x7ffde4ca2000)
> +  */
> +static inline int check_vma_modification(struct maple_tree *mt)
> +{
> +	MA_STATE(mas, mt, 0, 0);


Don't we need locking in here?

> +
> +	/* vma with old start and old end */
> +	__mas_set_range(&mas, 0x7fffffffe000, 0x7ffffffff000 - 1);
> +	mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL);
> +	mas_store_prealloc(&mas, xa_mk_value(1));
> +
> +	/* next write occurs partly in previous range [0, 0x7fffffffe000)*/
> +	mas_prev_range(&mas, 0);
> +	/* expand vma to {0x7ffde4ca1000, 0x7ffffffff000) */
> +	__mas_set_range(&mas, 0x7ffde4ca1000, 0x7ffffffff000 - 1);
> +	mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL);
> +	mas_store_prealloc(&mas, xa_mk_value(1));
> +
> +	/* shrink vma to [0x7ffde4ca1000, 7ffde4ca2000) */
> +	__mas_set_range(&mas, 0x7ffde4ca2000, 0x7ffffffff000 - 1);
> +	mas_preallocate(&mas, NULL, GFP_KERNEL);
> +	mas_store_prealloc(&mas, NULL);
> +	mt_dump(mt, mt_dump_hex);
> +
> +	return 0;
> +}
> +
> +
>  void farmer_tests(void)
>  {
>  	struct maple_node *node;
> @@ -36230,6 +36261,10 @@ void farmer_tests(void)
>  
>  	mt_dump(&tree, mt_dump_dec);
>  
> +	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | MT_FLAGS_USE_RCU);
> +	check_vma_modification(&tree);
> +	mtree_destroy(&tree);
> +
>  	tree.ma_root = xa_mk_value(0);
>  	mt_dump(&tree, mt_dump_dec);
>  
> -- 
> 2.45.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ