[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5c1da066-0c76-42f4-8c46-a99f60a900bf@kernel.org>
Date: Wed, 11 Jun 2025 14:07:01 +0800
From: Chao Yu <chao@...nel.org>
To: Jaegeuk Kim <jaegeuk@...nel.org>, Zhiguo Niu <zhiguo.niu@...soc.com>
Cc: chao@...nel.org, linux-f2fs-devel@...ts.sourceforge.net,
linux-kernel@...r.kernel.org, niuzhiguo84@...il.com, ke.wang@...soc.com,
Hao_hao.Wang@...soc.com, baocong.liu@...soc.com
Subject: Re: [PATCH v3] f2fs: compress: fix UAF of f2fs_inode_info in
f2fs_free_dic
On 6/11/25 00:08, Jaegeuk Kim wrote:
> Hi Zhiguo,
>
> This patch causes CPU hang when running fsstress on compressed/non-compressed
> files. Please check.
Oh, seems it may cause below deadlock:
CPU0
process A
- spin_lock(i_lock)
software IRQ
- end_io
- igrab
- spin_lock(i_lock)
Thanks,
>
> On 06/05, Zhiguo Niu wrote:
>> The decompress_io_ctx may be released asynchronously after
>> I/O completion. If this file is deleted immediately after read,
>> and the kworker of processing post_read_wq has not been executed yet
>> due to high workloads, It is possible that the inode(f2fs_inode_info)
>> is evicted and freed before it is used f2fs_free_dic.
>>
>> The UAF case as below:
>> Thread A Thread B
>> - f2fs_decompress_end_io
>> - f2fs_put_dic
>> - queue_work
>> add free_dic work to post_read_wq
>> - do_unlink
>> - iput
>> - evict
>> - call_rcu
>> This file is deleted after read.
>>
>> Thread C kworker to process post_read_wq
>> - rcu_do_batch
>> - f2fs_free_inode
>> - kmem_cache_free
>> inode is freed by rcu
>> - process_scheduled_works
>> - f2fs_late_free_dic
>> - f2fs_free_dic
>> - f2fs_release_decomp_mem
>> read (dic->inode)->i_compress_algorithm
>>
>> This patch use igrab before f2fs_free_dic and iput after free the dic when dic free
>> action is done by kworker.
>>
>> Cc: Daeho Jeong <daehojeong@...gle.com>
>> Fixes: bff139b49d9f ("f2fs: handle decompress only post processing in softirq")
>> Signed-off-by: Zhiguo Niu <zhiguo.niu@...soc.com>
>> Signed-off-by: Baocong Liu <baocong.liu@...soc.com>
>> ---
>> v3: use igrab to replace __iget
>> v2: use __iget/iput function
>> ---
>> fs/f2fs/compress.c | 14 +++++++++-----
>> 1 file changed, 9 insertions(+), 5 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index b3c1df9..729ad16 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -1687,7 +1687,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
>> }
>>
>> static void f2fs_free_dic(struct decompress_io_ctx *dic,
>> - bool bypass_destroy_callback);
>> + bool bypass_destroy_callback, bool late_free);
>>
>> struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
>> {
>> @@ -1743,12 +1743,12 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
>> return dic;
>>
>> out_free:
>> - f2fs_free_dic(dic, true);
>> + f2fs_free_dic(dic, true, false);
>> return ERR_PTR(ret);
>> }
>>
>> static void f2fs_free_dic(struct decompress_io_ctx *dic,
>> - bool bypass_destroy_callback)
>> + bool bypass_destroy_callback, bool late_free)
>> {
>> int i;
>>
>> @@ -1775,6 +1775,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
>> }
>>
>> page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
>> + if (late_free)
>> + iput(dic->inode);
>> kmem_cache_free(dic_entry_slab, dic);
>> }
>>
>> @@ -1783,16 +1785,18 @@ static void f2fs_late_free_dic(struct work_struct *work)
>> struct decompress_io_ctx *dic =
>> container_of(work, struct decompress_io_ctx, free_work);
>>
>> - f2fs_free_dic(dic, false);
>> + f2fs_free_dic(dic, false, true);
>> }
>>
>> static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
>> {
>> if (refcount_dec_and_test(&dic->refcnt)) {
>> if (in_task) {
>> - f2fs_free_dic(dic, false);
>> + f2fs_free_dic(dic, false, false);
>> } else {
>> INIT_WORK(&dic->free_work, f2fs_late_free_dic);
>> + /* use igrab to avoid inode is evicted simultaneously */
>> + f2fs_bug_on(F2FS_I_SB(dic->inode), !igrab(dic->inode));
>> queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
>> &dic->free_work);
>> }
>> --
>> 1.9.1
Powered by blists - more mailing lists