[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240607145902.1137853-8-kernel@pankajraghav.com>
Date: Fri, 7 Jun 2024 14:58:58 +0000
From: "Pankaj Raghav (Samsung)" <kernel@...kajraghav.com>
To: david@...morbit.com,
djwong@...nel.org,
chandan.babu@...cle.com,
brauner@...nel.org,
akpm@...ux-foundation.org,
willy@...radead.org
Cc: mcgrof@...nel.org,
linux-mm@...ck.org,
hare@...e.de,
linux-kernel@...r.kernel.org,
yang@...amperecomputing.com,
Zi Yan <zi.yan@...t.com>,
linux-xfs@...r.kernel.org,
p.raghav@...sung.com,
linux-fsdevel@...r.kernel.org,
kernel@...kajraghav.com,
hch@....de,
gost.dev@...sung.com,
cl@...amperecomputing.com,
john.g.garry@...cle.com
Subject: [PATCH v7 07/11] iomap: fix iomap_dio_zero() for fs bs > system page size
From: Pankaj Raghav <p.raghav@...sung.com>
iomap_dio_zero() will pad a fs block with zeroes if the direct IO size
< fs block size. iomap_dio_zero() has an implicit assumption that fs block
size < page_size. This is true for most filesystems at the moment.
If the block size > page size, this will send the contents of the page
next to zero page(as len > PAGE_SIZE) to the underlying block device,
causing FS corruption.
iomap is a generic infrastructure and it should not make any assumptions
about the fs block size and the page size of the system.
Signed-off-by: Pankaj Raghav <p.raghav@...sung.com>
Reviewed-by: Hannes Reinecke <hare@...e.de>
---
fs/internal.h | 5 +++++
fs/iomap/buffered-io.c | 6 ++++++
fs/iomap/direct-io.c | 26 ++++++++++++++++++++++++--
3 files changed, 35 insertions(+), 2 deletions(-)
diff --git a/fs/internal.h b/fs/internal.h
index 84f371193f74..30217f0ff4c6 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -35,6 +35,11 @@ static inline void bdev_cache_init(void)
int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap);
+/*
+ * iomap/direct-io.c
+ */
+int iomap_dio_init(void);
+
/*
* char_dev.c
*/
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 49938419fcc7..9f791db473e4 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1990,6 +1990,12 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
static int __init iomap_init(void)
{
+ int ret;
+
+ ret = iomap_dio_init();
+ if (ret)
+ return ret;
+
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct iomap_ioend, io_bio),
BIOSET_NEED_BVECS);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index f3b43d223a46..b95600b254a3 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -27,6 +27,13 @@
#define IOMAP_DIO_WRITE (1U << 30)
#define IOMAP_DIO_DIRTY (1U << 31)
+/*
+ * Used for sub block zeroing in iomap_dio_zero()
+ */
+#define ZERO_FSB_SIZE (65536)
+#define ZERO_FSB_ORDER (get_order(ZERO_FSB_SIZE))
+static struct page *zero_fs_block;
+
struct iomap_dio {
struct kiocb *iocb;
const struct iomap_dio_ops *dops;
@@ -52,6 +59,16 @@ struct iomap_dio {
};
};
+int iomap_dio_init(void)
+{
+ zero_fs_block = alloc_pages(GFP_KERNEL | __GFP_ZERO, ZERO_FSB_ORDER);
+
+ if (!zero_fs_block)
+ return -ENOMEM;
+
+ return 0;
+}
+
static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
{
@@ -236,17 +253,22 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
struct inode *inode = file_inode(dio->iocb->ki_filp);
- struct page *page = ZERO_PAGE(0);
struct bio *bio;
+ /*
+ * Max block size supported is 64k
+ */
+ WARN_ON_ONCE(len > ZERO_FSB_SIZE);
+
bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
+
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- __bio_add_page(bio, page, len, 0);
+ __bio_add_page(bio, zero_fs_block, len, 0);
iomap_dio_submit_bio(iter, dio, bio, pos);
}
--
2.44.1
Powered by blists - more mailing lists