lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date: Wed, 6 Mar 2024 10:02:07 +0800
From: Chao Yu <chao@...nel.org>
To: Xiuhong Wang <xiuhong.wang@...soc.com>, jaegeuk@...nel.org,
 linux-f2fs-devel@...ts.sourceforge.net, linux-kernel@...r.kernel.org
Cc: hongyu.jin.cn@...il.com, niuzhiguo84@...il.com, ke.wang@...soc.com,
 xiuhong.wang.cn@...il.com
Subject: Re: [PATCH 2/2] f2fs: compress: fix reserve_cblocks counting error
 when out of space

On 2024/3/5 16:40, Xiuhong Wang wrote:
> When a file only needs one direct_node, performing the following
> operations will cause the file to be unrepairable:
> 
> unisoc # ./f2fs_io compress test.apk
> unisoc #df -h | grep dm-48
> /dev/block/dm-48 112G 112G 1.2M 100% /data
> 
> unisoc # ./f2fs_io release_cblocks test.apk
> 924
> unisoc # df -h | grep dm-48
> /dev/block/dm-48 112G 112G 4.8M 100% /data
> 
> unisoc # dd if=/dev/random of=file4 bs=1M count=3
> 3145728 bytes (3.0 M) copied, 0.025 s, 120 M/s
> unisoc # df -h | grep dm-48
> /dev/block/dm-48 112G 112G 1.8M 100% /data
> 
> unisoc # ./f2fs_io reserve_cblocks test.apk
> F2FS_IOC_RESERVE_COMPRESS_BLOCKS failed: No space left on device
> 
> adb reboot
> unisoc # df -h  | grep dm-48
> /dev/block/dm-48             112G 112G   11M 100% /data
> unisoc # ./f2fs_io reserve_cblocks test.apk
> 0
> 
> This is because the file has only one direct_node. After returning
> to -ENOSPC, reserved_blocks += ret will not be executed. As a result,
> the reserved_blocks at this time is still 0, which is not the real
> number of reserved blocks. Therefore, fsck cannot be set to repair
> the file.
> 
> After this patch, the fsck flag will be set to fix this problem.
> 
> unisoc # df -h | grep dm-48
> /dev/block/dm-48             112G 112G  1.8M 100% /data
> unisoc # ./f2fs_io reserve_cblocks test.apk
> F2FS_IOC_RESERVE_COMPRESS_BLOCKS failed: No space left on device
> 
> adb reboot then fsck will be executed
> unisoc # df -h  | grep dm-48
> /dev/block/dm-48             112G 112G   11M 100% /data
> unisoc # ./f2fs_io reserve_cblocks test.apk
> 924
> 
> Fixes: c75488fb4d82 ("f2fs: introduce F2FS_IOC_RESERVE_COMPRESS_BLOCKS")
> Signed-off-by: Xiuhong Wang <xiuhong.wang@...soc.com>
> Signed-off-by: Zhiguo Niu <zhiguo.niu@...soc.com>
> ---
>   fs/f2fs/file.c | 17 +++++++++--------
>   1 file changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index 572d7bd4d161..97a7233c7ea7 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -3624,10 +3624,10 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
>   	return ret;
>   }
>   
> -static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
> +static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
> +		unsigned int *reserved_blocks)
>   {
>   	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> -	unsigned int reserved_blocks = 0;
>   	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
>   	block_t blkaddr;
>   	int i;
> @@ -3691,12 +3691,12 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
>   
>   		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
>   
> -		reserved_blocks += reserved;
> +		*reserved_blocks += reserved;
>   next:
>   		count -= cluster_size;
>   	}
>   
> -	return reserved_blocks;
> +	return 0;
>   }
>   
>   static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
> @@ -3740,6 +3740,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
>   	while (page_idx < last_idx) {
>   		struct dnode_of_data dn;
>   		pgoff_t end_offset, count;
> +		unsigned int tmp_reserved_blocks;
>   
>   		set_new_dnode(&dn, inode, NULL, NULL, 0);
>   		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
> @@ -3757,7 +3758,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
>   		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
>   		count = round_up(count, F2FS_I(inode)->i_cluster_size);
>   
> -		ret = reserve_compress_blocks(&dn, count);
> +		ret = reserve_compress_blocks(&dn, count, &tmp_reserved_blocks);

How about passing &reserved_blocks into reserve_compress_blocks()?

Thanks,

> +		reserved_blocks += tmp_reserved_blocks;
>   
>   		f2fs_put_dnode(&dn);
>   
> @@ -3765,13 +3767,12 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
>   			break;
>   
>   		page_idx += count;
> -		reserved_blocks += ret;
>   	}
>   
>   	filemap_invalidate_unlock(inode->i_mapping);
>   	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
>   
> -	if (ret >= 0) {
> +	if (!ret) {
>   		clear_inode_flag(inode, FI_COMPRESS_RELEASED);
>   		inode_set_ctime_current(inode);
>   		f2fs_mark_inode_dirty_sync(inode, true);
> @@ -3780,7 +3781,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
>   	inode_unlock(inode);
>   	mnt_drop_write_file(filp);
>   
> -	if (ret >= 0) {
> +	if (!ret) {
>   		ret = put_user(reserved_blocks, (u64 __user *)arg);
>   	} else if (reserved_blocks &&
>   			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ