[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230523175451.unnpbm5ye35h2jbl@revolver>
Date: Tue, 23 May 2023 13:54:51 -0400
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Peng Zhang <zhangpeng.00@...edance.com>
Cc: akpm@...ux-foundation.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, maple-tree@...ts.infradead.org
Subject: Re: [PATCH v3 06/10] maple_tree: Add mas_wr_new_end() to calculate
new_end accurately
* Peng Zhang <zhangpeng.00@...edance.com> [230522 01:07]:
> The previous new_end calculation is inaccurate, because it assumes that
> two new pivots must be added (this is inaccurate), and sometimes it will
> miss the fast path and enter the slow path. Add mas_wr_new_end() to
> accurately calculate new_end to make the conditions for entering the
> fast path more accurate.
>
Reviewed-by: Liam R. Howlett <Liam.Howlett@...cle.com>
> Signed-off-by: Peng Zhang <zhangpeng.00@...edance.com>
> ---
> lib/maple_tree.c | 34 +++++++++++++++++++++++-----------
> 1 file changed, 23 insertions(+), 11 deletions(-)
>
> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
> index 562507979a4b..0550a07355d7 100644
> --- a/lib/maple_tree.c
> +++ b/lib/maple_tree.c
> @@ -4297,6 +4297,21 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
> }
> }
>
> +static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
> +{
> + struct ma_state *mas = wr_mas->mas;
> + unsigned char new_end = wr_mas->node_end + 2;
> +
> + new_end -= wr_mas->offset_end - mas->offset;
> + if (wr_mas->r_min == mas->index)
> + new_end--;
> +
> + if (wr_mas->end_piv == mas->last)
> + new_end--;
> +
> + return new_end;
> +}
> +
> static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
> {
> unsigned char end = wr_mas->node_end;
> @@ -4352,9 +4367,8 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
>
> static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
> {
> - unsigned char node_slots;
> - unsigned char node_size;
> struct ma_state *mas = wr_mas->mas;
> + unsigned char new_end;
>
> /* Direct replacement */
> if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
> @@ -4364,17 +4378,15 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
> return;
> }
>
> - /* Attempt to append */
> - node_slots = mt_slots[wr_mas->type];
> - node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
> - if (mas->max == ULONG_MAX)
> - node_size++;
> -
> - /* slot and node store will not fit, go to the slow path */
> - if (unlikely(node_size >= node_slots))
> + /*
> + * new_end exceeds the size of the maple node and cannot enter the fast
> + * path.
> + */
> + new_end = mas_wr_new_end(wr_mas);
> + if (new_end >= mt_slots[wr_mas->type])
> goto slow_path;
>
> - if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
> + if (wr_mas->entry && (wr_mas->node_end < mt_slots[wr_mas->type] - 1) &&
> (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
> if (!wr_mas->content || !wr_mas->entry)
> mas_update_gap(mas);
> --
> 2.20.1
>
Powered by blists - more mailing lists