[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9c414060-989d-55bb-9a7b-0f33bf103c4f@leemhuis.info>
Date: Mon, 5 Dec 2022 16:41:49 +0100
From: Thorsten Leemhuis <regressions@...mhuis.info>
To: Ted Tso <tytso@....edu>, Andreas Dilger <adilger.kernel@...ger.ca>
Cc: Jan Kara <jack@...e.cz>, linux-ext4@...r.kernel.org,
stable@...r.kernel.org, Thilo Fromm <t-lo@...ux.microsoft.com>,
Jeremi Piotrowski <jpiotrowski@...ux.microsoft.com>,
Andreas Gruenbacher <agruenba@...hat.com>
Subject: Re: [PATCH] ext4: Fix deadlock due to mbcache entry corruption
On 01.12.22 16:10, Jeremi Piotrowski wrote:
> On Wed, Nov 23, 2022 at 08:39:50PM +0100, Jan Kara wrote:
>> When manipulating xattr blocks, we can deadlock infinitely looping
>> inside ext4_xattr_block_set() where we constantly keep finding xattr
>> block for reuse in mbcache but we are unable to reuse it because its
>> reference count is too big. This happens because cache entry for the
>> xattr block is marked as reusable (e_reusable set) although its
>> reference count is too big. When this inconsistency happens, this
>> inconsistent state is kept indefinitely and so ext4_xattr_block_set()
>> keeps retrying indefinitely.
>>
>> The inconsistent state is caused by non-atomic update of e_reusable bit.
>> e_reusable is part of a bitfield and e_reusable update can race with
>> update of e_referenced bit in the same bitfield resulting in loss of one
>> of the updates. Fix the problem by using atomic bitops instead.
>>
>> CC: stable@...r.kernel.org
>> Fixes: 6048c64b2609 ("mbcache: add reusable flag to cache entries")
>> Reported-and-tested-by: Jeremi Piotrowski <jpiotrowski@...ux.microsoft.com>
>> Reported-by: Thilo Fromm <t-lo@...ux.microsoft.com>
>> Link: https://lore.kernel.org/r/c77bf00f-4618-7149-56f1-b8d1664b9d07@linux.microsoft.com/
>> Signed-off-by: Jan Kara <jack@...e.cz>
>
> Could it be that you didn't see this email? We have users who are hitting this
> and are very eager to see this bugfix get merged and backported to stable.
Andreas, Ted, or any other trusted ext4 reviewer:
Jan's patch to fix the regression is now our 12 days out and afaics
didn't make any progress (or did I miss something?). Is there are reason
why or did it simply fall through the cracks? Just asking, because it
would be good to finally get this resolved.
Ciao, Thorsten (wearing his 'the Linux kernel's regression tracker' hat)
P.S.: As the Linux kernel's regression tracker I deal with a lot of
reports and sometimes miss something important when writing mails like
this. If that's the case here, don't hesitate to tell me in a public
reply, it's in everyone's interest to set the public record straight.
>> fs/ext4/xattr.c | 4 ++--
>> fs/mbcache.c | 14 ++++++++------
>> include/linux/mbcache.h | 9 +++++++--
>> 3 files changed, 17 insertions(+), 10 deletions(-)
>>
>> diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
>> index 800ce5cdb9d2..08043aa72cf1 100644
>> --- a/fs/ext4/xattr.c
>> +++ b/fs/ext4/xattr.c
>> @@ -1281,7 +1281,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
>> ce = mb_cache_entry_get(ea_block_cache, hash,
>> bh->b_blocknr);
>> if (ce) {
>> - ce->e_reusable = 1;
>> + set_bit(MBE_REUSABLE_B, &ce->e_flags);
>> mb_cache_entry_put(ea_block_cache, ce);
>> }
>> }
>> @@ -2042,7 +2042,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
>> }
>> BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
>> if (ref == EXT4_XATTR_REFCOUNT_MAX)
>> - ce->e_reusable = 0;
>> + clear_bit(MBE_REUSABLE_B, &ce->e_flags);
>> ea_bdebug(new_bh, "reusing; refcount now=%d",
>> ref);
>> ext4_xattr_block_csum_set(inode, new_bh);
>> diff --git a/fs/mbcache.c b/fs/mbcache.c
>> index e272ad738faf..2a4b8b549e93 100644
>> --- a/fs/mbcache.c
>> +++ b/fs/mbcache.c
>> @@ -100,8 +100,9 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
>> atomic_set(&entry->e_refcnt, 2);
>> entry->e_key = key;
>> entry->e_value = value;
>> - entry->e_reusable = reusable;
>> - entry->e_referenced = 0;
>> + entry->e_flags = 0;
>> + if (reusable)
>> + set_bit(MBE_REUSABLE_B, &entry->e_flags);
>> head = mb_cache_entry_head(cache, key);
>> hlist_bl_lock(head);
>> hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
>> @@ -165,7 +166,8 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
>> while (node) {
>> entry = hlist_bl_entry(node, struct mb_cache_entry,
>> e_hash_list);
>> - if (entry->e_key == key && entry->e_reusable &&
>> + if (entry->e_key == key &&
>> + test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
>> atomic_inc_not_zero(&entry->e_refcnt))
>> goto out;
>> node = node->next;
>> @@ -284,7 +286,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
>> void mb_cache_entry_touch(struct mb_cache *cache,
>> struct mb_cache_entry *entry)
>> {
>> - entry->e_referenced = 1;
>> + set_bit(MBE_REFERENCED_B, &entry->e_flags);
>> }
>> EXPORT_SYMBOL(mb_cache_entry_touch);
>>
>> @@ -309,9 +311,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
>> entry = list_first_entry(&cache->c_list,
>> struct mb_cache_entry, e_list);
>> /* Drop initial hash reference if there is no user */
>> - if (entry->e_referenced ||
>> + if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
>> atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
>> - entry->e_referenced = 0;
>> + clear_bit(MBE_REFERENCED_B, &entry->e_flags);
>> list_move_tail(&entry->e_list, &cache->c_list);
>> continue;
>> }
>> diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
>> index 2da63fd7b98f..97e64184767d 100644
>> --- a/include/linux/mbcache.h
>> +++ b/include/linux/mbcache.h
>> @@ -10,6 +10,12 @@
>>
>> struct mb_cache;
>>
>> +/* Cache entry flags */
>> +enum {
>> + MBE_REFERENCED_B = 0,
>> + MBE_REUSABLE_B
>> +};
>> +
>> struct mb_cache_entry {
>> /* List of entries in cache - protected by cache->c_list_lock */
>> struct list_head e_list;
>> @@ -26,8 +32,7 @@ struct mb_cache_entry {
>> atomic_t e_refcnt;
>> /* Key in hash - stable during lifetime of the entry */
>> u32 e_key;
>> - u32 e_referenced:1;
>> - u32 e_reusable:1;
>> + unsigned long e_flags;
>> /* User provided value - stable during lifetime of the entry */
>> u64 e_value;
>> };
>> --
>> 2.35.3
Powered by blists - more mailing lists