[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251020122541.7227-1-mngyadam@amazon.de>
Date: Mon, 20 Oct 2025 14:25:38 +0200
From: Mahmoud Adam <mngyadam@...zon.de>
To: <stable@...r.kernel.org>
CC: <gregkh@...uxfoundation.org>, <nagy@...aternagy.com>, "Darrick J. Wong"
<djwong@...nel.org>, Christoph Hellwig <hch@....de>, Luis Chamberlain
<mcgrof@...nel.org>, Shin'ichiro Kawasaki <shinichiro.kawasaki@....com>,
"Jens Axboe" <axboe@...nel.dk>, Ryusuke Konishi <konishi.ryusuke@...il.com>,
<linux-block@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-nilfs@...r.kernel.org>
Subject: [PATCH 6.6 1/2] block: fix race between set_blocksize and read paths
From: "Darrick J. Wong" <djwong@...nel.org>
commit c0e473a0d226479e8e925d5ba93f751d8df628e9 upstream.
With the new large sector size support, it's now the case that
set_blocksize can change i_blksize and the folio order in a manner that
conflicts with a concurrent reader and causes a kernel crash.
Specifically, let's say that udev-worker calls libblkid to detect the
labels on a block device. The read call can create an order-0 folio to
read the first 4096 bytes from the disk. But then udev is preempted.
Next, someone tries to mount an 8k-sectorsize filesystem from the same
block device. The filesystem calls set_blksize, which sets i_blksize to
8192 and the minimum folio order to 1.
Now udev resumes, still holding the order-0 folio it allocated. It then
tries to schedule a read bio and do_mpage_readahead tries to create
bufferheads for the folio. Unfortunately, blocks_per_folio == 0 because
the page size is 4096 but the blocksize is 8192 so no bufferheads are
attached and the bh walk never sets bdev. We then submit the bio with a
NULL block device and crash.
Therefore, truncate the page cache after flushing but before updating
i_blksize. However, that's not enough -- we also need to lock out file
IO and page faults during the update. Take both the i_rwsem and the
invalidate_lock in exclusive mode for invalidations, and in shared mode
for read/write operations.
I don't know if this is the correct fix, but xfs/259 found it.
Signed-off-by: Darrick J. Wong <djwong@...nel.org>
Reviewed-by: Christoph Hellwig <hch@....de>
Reviewed-by: Luis Chamberlain <mcgrof@...nel.org>
Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@....com>
Link: https://lore.kernel.org/r/174543795699.4139148.2086129139322431423.stgit@frogsfrogsfrogs
Signed-off-by: Jens Axboe <axboe@...nel.dk>
[use bdev->bd_inode instead]
Signed-off-by: Mahmoud Adam <mngyadam@...zon.de>
---
Fixes CVE-2025-38073.
block/bdev.c | 17 +++++++++++++++++
block/blk-zoned.c | 5 ++++-
block/fops.c | 16 ++++++++++++++++
block/ioctl.c | 6 ++++++
4 files changed, 43 insertions(+), 1 deletion(-)
diff --git a/block/bdev.c b/block/bdev.c
index 5a54977518eeae..a8357b72a27b86 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -147,9 +147,26 @@ int set_blocksize(struct block_device *bdev, int size)
/* Don't change the size if it is same as current */
if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
+ /*
+ * Flush and truncate the pagecache before we reconfigure the
+ * mapping geometry because folio sizes are variable now. If a
+ * reader has already allocated a folio whose size is smaller
+ * than the new min_order but invokes readahead after the new
+ * min_order becomes visible, readahead will think there are
+ * "zero" blocks per folio and crash. Take the inode and
+ * invalidation locks to avoid racing with
+ * read/write/fallocate.
+ */
+ inode_lock(bdev->bd_inode);
+ filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+
sync_blockdev(bdev);
+ kill_bdev(bdev);
+
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
+ filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ inode_unlock(bdev->bd_inode);
}
return 0;
}
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 619ee41a51cc8c..644bfa1f6753ea 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -401,6 +401,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
+ inode_lock(bdev->bd_inode);
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
@@ -423,8 +424,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
GFP_KERNEL);
fail:
- if (cmd == BLKRESETZONE)
+ if (cmd == BLKRESETZONE) {
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ inode_unlock(bdev->bd_inode);
+ }
return ret;
}
diff --git a/block/fops.c b/block/fops.c
index 7c257eb3564d0c..088143fa9ac9e1 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -681,7 +681,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = direct_write_fallback(iocb, from, ret,
blkdev_buffered_write(iocb, from));
} else {
+ /*
+ * Take i_rwsem and invalidate_lock to avoid racing with
+ * set_blocksize changing i_blkbits/folio order and punching
+ * out the pagecache.
+ */
+ inode_lock_shared(bd_inode);
ret = blkdev_buffered_write(iocb, from);
+ inode_unlock_shared(bd_inode);
}
if (ret > 0)
@@ -693,6 +700,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+ struct inode *bd_inode = bdev->bd_inode;
loff_t size = bdev_nr_bytes(bdev);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
@@ -728,7 +736,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
goto reexpand;
}
+ /*
+ * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
+ * changing i_blkbits/folio order and punching out the pagecache.
+ */
+ inode_lock_shared(bd_inode);
ret = filemap_read(iocb, to, ret);
+ inode_unlock_shared(bd_inode);
reexpand:
if (unlikely(shorted))
@@ -771,6 +785,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
+ inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
/*
@@ -811,6 +826,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
fail:
filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
return error;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 231537f79a8cb4..024767fa1e52d5 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -114,6 +114,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
+ inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end - 1);
if (err)
@@ -121,6 +122,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
fail:
filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
return err;
}
@@ -146,12 +148,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
+ inode_lock(bdev->bd_inode);
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ inode_unlock(bdev->bd_inode);
return err;
}
@@ -184,6 +188,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
+ inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
@@ -194,6 +199,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
fail:
filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
return err;
}
--
2.47.3
Amazon Web Services Development Center Germany GmbH
Tamara-Danz-Str. 13
10243 Berlin
Geschaeftsfuehrung: Christian Schlaeger
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597
Powered by blists - more mailing lists