[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240625114420.719014-7-kernel@pankajraghav.com>
Date: Tue, 25 Jun 2024 11:44:16 +0000
From: "Pankaj Raghav (Samsung)" <kernel@...kajraghav.com>
To: david@...morbit.com,
willy@...radead.org,
chandan.babu@...cle.com,
djwong@...nel.org,
brauner@...nel.org,
akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org,
yang@...amperecomputing.com,
linux-mm@...ck.org,
john.g.garry@...cle.com,
linux-fsdevel@...r.kernel.org,
hare@...e.de,
p.raghav@...sung.com,
mcgrof@...nel.org,
gost.dev@...sung.com,
cl@...amperecomputing.com,
linux-xfs@...r.kernel.org,
kernel@...kajraghav.com,
hch@....de,
Zi Yan <zi.yan@...t.com>
Subject: [PATCH v8 06/10] iomap: fix iomap_dio_zero() for fs bs > system page size
From: Pankaj Raghav <p.raghav@...sung.com>
iomap_dio_zero() will pad a fs block with zeroes if the direct IO size
< fs block size. iomap_dio_zero() has an implicit assumption that fs block
size < page_size. This is true for most filesystems at the moment.
If the block size > page size, this will send the contents of the page
next to zero page(as len > PAGE_SIZE) to the underlying block device,
causing FS corruption.
iomap is a generic infrastructure and it should not make any assumptions
about the fs block size and the page size of the system.
Signed-off-by: Pankaj Raghav <p.raghav@...sung.com>
Reviewed-by: Hannes Reinecke <hare@...e.de>
---
fs/iomap/buffered-io.c | 4 ++--
fs/iomap/direct-io.c | 30 ++++++++++++++++++++++++++++--
2 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index f420c53d86ac..9a9e94c7ed1d 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -2007,10 +2007,10 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
}
EXPORT_SYMBOL_GPL(iomap_writepages);
-static int __init iomap_init(void)
+static int __init iomap_pagecache_init(void)
{
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct iomap_ioend, io_bio),
BIOSET_NEED_BVECS);
}
-fs_initcall(iomap_init);
+fs_initcall(iomap_pagecache_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index f3b43d223a46..61d09d2364f7 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -11,6 +11,7 @@
#include <linux/iomap.h>
#include <linux/backing-dev.h>
#include <linux/uio.h>
+#include <linux/set_memory.h>
#include <linux/task_io_accounting_ops.h>
#include "trace.h"
@@ -27,6 +28,13 @@
#define IOMAP_DIO_WRITE (1U << 30)
#define IOMAP_DIO_DIRTY (1U << 31)
+/*
+ * Used for sub block zeroing in iomap_dio_zero()
+ */
+#define ZERO_PAGE_64K_SIZE (65536)
+#define ZERO_PAGE_64K_ORDER (get_order(ZERO_PAGE_64K_SIZE))
+static struct page *zero_page_64k;
+
struct iomap_dio {
struct kiocb *iocb;
const struct iomap_dio_ops *dops;
@@ -236,9 +244,13 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
struct inode *inode = file_inode(dio->iocb->ki_filp);
- struct page *page = ZERO_PAGE(0);
struct bio *bio;
+ /*
+ * Max block size supported is 64k
+ */
+ WARN_ON_ONCE(len > ZERO_PAGE_64K_SIZE);
+
bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
@@ -246,7 +258,7 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- __bio_add_page(bio, page, len, 0);
+ __bio_add_page(bio, zero_page_64k, len, 0);
iomap_dio_submit_bio(iter, dio, bio, pos);
}
@@ -753,3 +765,17 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return iomap_dio_complete(dio);
}
EXPORT_SYMBOL_GPL(iomap_dio_rw);
+
+static int __init iomap_dio_init(void)
+{
+ zero_page_64k = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ ZERO_PAGE_64K_ORDER);
+
+ if (!zero_page_64k)
+ return -ENOMEM;
+
+ set_memory_ro((unsigned long)page_address(zero_page_64k),
+ 1U << ZERO_PAGE_64K_ORDER);
+ return 0;
+}
+fs_initcall(iomap_dio_init);
--
2.44.1
Powered by blists - more mailing lists