lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <871pz7adjq.fsf_-_@mail.parknet.co.jp>
Date: Tue, 19 Nov 2024 16:27:37 +0900
From: OGAWA Hirofumi <hirofumi@...l.parknet.co.jp>
To: Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org,
        syzbot
 <syzbot+a5d8c609c02f508672cc@...kaller.appspotmail.com>,
        linkinjeon@...nel.org, linux-fsdevel@...r.kernel.org,
        linux-kernel@...r.kernel.org, sj1557.seo@...sung.com,
        syzkaller-bugs@...glegroups.com
Subject: [PATCH] loop: Fix ABBA locking race (Re: [syzbot] [exfat?] possible
 deadlock in fat_count_free_clusters)

OGAWA Hirofumi <hirofumi@...l.parknet.co.jp> writes:

ping?

> Hi,
>
> syzbot <syzbot+a5d8c609c02f508672cc@...kaller.appspotmail.com> writes:
>
>> syzbot found the following issue on:
>>
>> HEAD commit:    929beafbe7ac Add linux-next specific files for 20241108
>> git tree:       linux-next
>> console output: https://syzkaller.appspot.com/x/log.txt?x=1621bd87980000
>> kernel config:  https://syzkaller.appspot.com/x/.config?x=75175323f2078363
>> dashboard link: https://syzkaller.appspot.com/bug?extid=a5d8c609c02f508672cc
>> compiler:       Debian clang version 15.0.6, GNU ld (GNU Binutils for Debian) 2.40
>
> This patch is to fix the above race. Please check this.
>
> Thanks
>
>
> From: OGAWA Hirofumi <hirofumi@...l.parknet.co.jp>
> Subject: [PATCH] loop: Fix ABBA locking race
> Date: Mon, 11 Nov 2024 21:53:36 +0900
>
> Current loop calls vfs_statfs() while holding the q->limits_lock. If
> FS takes some locking in vfs_statfs callback, this may lead to ABBA
> locking bug (at least, FAT fs has this issue actually).
>
> So this patch calls vfs_statfs() outside q->limits_locks instead,
> because looks like there is no reason to hold q->limits_locks while
> getting discard configs.
>
> Chain exists of:
>   &sbi->fat_lock --> &q->q_usage_counter(io)#17 --> &q->limits_lock
>
>  Possible unsafe locking scenario:
>
>        CPU0                    CPU1
>        ----                    ----
>   lock(&q->limits_lock);
>                                lock(&q->q_usage_counter(io)#17);
>                                lock(&q->limits_lock);
>   lock(&sbi->fat_lock);
>
>  *** DEADLOCK ***
>
> Reported-by: syzbot+a5d8c609c02f508672cc@...kaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=a5d8c609c02f508672cc
> Signed-off-by: OGAWA Hirofumi <hirofumi@...l.parknet.co.jp>
> ---
>  drivers/block/loop.c |   31 ++++++++++++++++---------------
>  1 file changed, 16 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/block/loop.c b/drivers/block/loop.c
> index 78a7bb2..5f3ce51 100644
> --- a/drivers/block/loop.c	2024-09-16 13:45:20.253220178 +0900
> +++ b/drivers/block/loop.c	2024-11-11 21:51:00.910135443 +0900
> @@ -770,12 +770,11 @@ static void loop_sysfs_exit(struct loop_
>  				   &loop_attribute_group);
>  }
>  
> -static void loop_config_discard(struct loop_device *lo,
> -		struct queue_limits *lim)
> +static void loop_get_discard_config(struct loop_device *lo,
> +				    u32 *granularity, u32 *max_discard_sectors)
>  {
>  	struct file *file = lo->lo_backing_file;
>  	struct inode *inode = file->f_mapping->host;
> -	u32 granularity = 0, max_discard_sectors = 0;
>  	struct kstatfs sbuf;
>  
>  	/*
> @@ -788,8 +787,9 @@ static void loop_config_discard(struct l
>  	if (S_ISBLK(inode->i_mode)) {
>  		struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
>  
> -		max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
> -		granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
> +		*max_discard_sectors =
> +			backingq->limits.max_write_zeroes_sectors;
> +		*granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
>  			queue_physical_block_size(backingq);
>  
>  	/*
> @@ -797,16 +797,9 @@ static void loop_config_discard(struct l
>  	 * image a.k.a. discard.
>  	 */
>  	} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
> -		max_discard_sectors = UINT_MAX >> 9;
> -		granularity = sbuf.f_bsize;
> +		*max_discard_sectors = UINT_MAX >> 9;
> +		*granularity = sbuf.f_bsize;
>  	}
> -
> -	lim->max_hw_discard_sectors = max_discard_sectors;
> -	lim->max_write_zeroes_sectors = max_discard_sectors;
> -	if (max_discard_sectors)
> -		lim->discard_granularity = granularity;
> -	else
> -		lim->discard_granularity = 0;
>  }
>  
>  struct loop_worker {
> @@ -992,6 +985,7 @@ static int loop_reconfigure_limits(struc
>  	struct inode *inode = file->f_mapping->host;
>  	struct block_device *backing_bdev = NULL;
>  	struct queue_limits lim;
> +	u32 granularity = 0, max_discard_sectors = 0;
>  
>  	if (S_ISBLK(inode->i_mode))
>  		backing_bdev = I_BDEV(inode);
> @@ -1001,6 +995,8 @@ static int loop_reconfigure_limits(struc
>  	if (!bsize)
>  		bsize = loop_default_blocksize(lo, backing_bdev);
>  
> +	loop_get_discard_config(lo, &granularity, &max_discard_sectors);
> +
>  	lim = queue_limits_start_update(lo->lo_queue);
>  	lim.logical_block_size = bsize;
>  	lim.physical_block_size = bsize;
> @@ -1010,7 +1006,12 @@ static int loop_reconfigure_limits(struc
>  		lim.features |= BLK_FEAT_WRITE_CACHE;
>  	if (backing_bdev && !bdev_nonrot(backing_bdev))
>  		lim.features |= BLK_FEAT_ROTATIONAL;
> -	loop_config_discard(lo, &lim);
> +	lim.max_hw_discard_sectors = max_discard_sectors;
> +	lim.max_write_zeroes_sectors = max_discard_sectors;
> +	if (max_discard_sectors)
> +		lim.discard_granularity = granularity;
> +	else
> +		lim.discard_granularity = 0;
>  	return queue_limits_commit_update(lo->lo_queue, &lim);
>  }
>  
> _

-- 
OGAWA Hirofumi <hirofumi@...l.parknet.co.jp>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ