[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c0530aec-754b-f382-3c96-35437d5ec762@bytedance.com>
Date: Tue, 16 May 2023 18:53:41 +0800
From: Peng Zhang <zhangpeng.00@...edance.com>
To: "Liam R. Howlett" <Liam.Howlett@...cle.com>
Cc: Peng Zhang <zhangpeng.00@...edance.com>, akpm@...ux-foundation.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
maple-tree@...ts.infradead.org
Subject: Re: [PATCH 10/10] maple_tree: Simplify and clean up
mas_wr_node_store()
在 2023/5/16 02:58, Liam R. Howlett 写道:
> * Peng Zhang <zhangpeng.00@...edance.com> [230515 09:18]:
>> Simplify and clean up mas_wr_node_store(), remove unnecessary code.
>
> This change fails the userspace testing for me.
>
>>
>> Signed-off-by: Peng Zhang <zhangpeng.00@...edance.com>
>> ---
>> lib/maple_tree.c | 75 +++++++++++++-----------------------------------
>> 1 file changed, 20 insertions(+), 55 deletions(-)
>>
>> diff --git a/lib/maple_tree.c b/lib/maple_tree.c
>> index d558e7bcb6da8..ff4aa01cf88b6 100644
>> --- a/lib/maple_tree.c
>> +++ b/lib/maple_tree.c
>> @@ -4066,46 +4066,21 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
>> *
>> * Return: True if stored, false otherwise
>> */
>> -static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
>> +static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
>> + unsigned char new_end)
>> {
>> struct ma_state *mas = wr_mas->mas;
>> void __rcu **dst_slots;
>> unsigned long *dst_pivots;
>> unsigned char dst_offset;
>> - unsigned char new_end = wr_mas->node_end;
>> - unsigned char offset;
>> - unsigned char node_slots = mt_slots[wr_mas->type];
>> struct maple_node reuse, *newnode;
>> - unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
>> + unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
>> bool in_rcu = mt_in_rcu(mas->tree);
>>
>> - offset = mas->offset;
>> - if (mas->last == wr_mas->r_max) {
>> - /* runs right to the end of the node */
>> - if (mas->last == mas->max)
>> - new_end = offset;
>> - /* don't copy this offset */
>> + if (mas->last == wr_mas->end_piv)
>> wr_mas->offset_end++;
I guess there is a problem here? If we modify wr_mas->offset_end,
but this function fails to try the fast path, it will enter the
slow path with the modified offset_end. But it also has this
problem in the previous version.
I applied this patch to linux-next/master but it passed the
userspace tests. I need more information to confirm what the
problem is.
Thanks.
>> - } else if (mas->last < wr_mas->r_max) {
>> - /* new range ends in this range */
>> - if (unlikely(wr_mas->r_max == ULONG_MAX))
>> - mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
>> -
>> - new_end++;
>> - } else {
>> - if (wr_mas->end_piv == mas->last)
>> - wr_mas->offset_end++;
>> -
>> - new_end -= wr_mas->offset_end - offset - 1;
>> - }
>> -
>> - /* new range starts within a range */
>> - if (wr_mas->r_min < mas->index)
>> - new_end++;
>> -
>> - /* Not enough room */
>> - if (new_end >= node_slots)
>> - return false;
>> + else if (unlikely(wr_mas->r_max == ULONG_MAX))
>> + mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
>>
>> /* Not enough data. */
>> if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
>> @@ -4128,47 +4103,36 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
>> dst_pivots = ma_pivots(newnode, wr_mas->type);
>> dst_slots = ma_slots(newnode, wr_mas->type);
>> /* Copy from start to insert point */
>> - memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
>> - memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
>> - dst_offset = offset;
>> + memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
>> + memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
>>
>> /* Handle insert of new range starting after old range */
>> if (wr_mas->r_min < mas->index) {
>> - mas->offset++;
>> - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
>> - dst_pivots[dst_offset++] = mas->index - 1;
>> + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
>> + dst_pivots[mas->offset++] = mas->index - 1;
>> }
>>
>> /* Store the new entry and range end. */
>> - if (dst_offset < max_piv)
>> - dst_pivots[dst_offset] = mas->last;
>> - mas->offset = dst_offset;
>> - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
>> + if (mas->offset < node_pivots)
>> + dst_pivots[mas->offset] = mas->last;
>> + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
>>
>> /*
>> * this range wrote to the end of the node or it overwrote the rest of
>> * the data
>> */
>> - if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
>> - new_end = dst_offset;
>> + if (wr_mas->offset_end > wr_mas->node_end)
>> goto done;
>> - }
>>
>> - dst_offset++;
>> + dst_offset = mas->offset + 1;
>> /* Copy to the end of node if necessary. */
>> copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
>> memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
>> sizeof(void *) * copy_size);
>> - if (dst_offset < max_piv) {
>> - if (copy_size > max_piv - dst_offset)
>> - copy_size = max_piv - dst_offset;
>> + memcpy(dst_pivots + dst_offset, wr_mas->pivots + wr_mas->offset_end,
>> + sizeof(unsigned long) * (copy_size - 1));
>>
>> - memcpy(dst_pivots + dst_offset,
>> - wr_mas->pivots + wr_mas->offset_end,
>> - sizeof(unsigned long) * copy_size);
>> - }
>> -
>> - if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
>> + if (new_end < node_pivots)
>> dst_pivots[new_end] = mas->max;
>>
>> done:
>> @@ -4429,7 +4393,8 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
>>
>> if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
>> return;
>> - else if (mas_wr_node_store(wr_mas))
>> +
>> + if (mas_wr_node_store(wr_mas, new_end))
>> return;
>>
>> if (mas_is_err(mas))
>> --
>> 2.20.1
>>
Powered by blists - more mailing lists