[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210519060045.1051226-7-ruansy.fnst@fujitsu.com>
Date: Wed, 19 May 2021 14:00:44 +0800
From: Shiyang Ruan <ruansy.fnst@...itsu.com>
To: <linux-kernel@...r.kernel.org>, <linux-xfs@...r.kernel.org>,
<linux-nvdimm@...ts.01.org>, <linux-fsdevel@...r.kernel.org>
CC: <darrick.wong@...cle.com>, <dan.j.williams@...el.com>,
<willy@...radead.org>, <viro@...iv.linux.org.uk>,
<david@...morbit.com>, <hch@....de>, <rgoldwyn@...e.de>
Subject: [PATCH v6 6/7] fs/xfs: Handle CoW for fsdax write() path
In fsdax mode, WRITE and ZERO on a shared extent need CoW performed. After
CoW, new allocated extents needs to be remapped to the file. So, add an
iomap_end for dax write ops to do the remapping work.
Signed-off-by: Shiyang Ruan <ruansy.fnst@...itsu.com>
---
fs/xfs/xfs_bmap_util.c | 3 +--
fs/xfs/xfs_file.c | 9 +++------
fs/xfs/xfs_iomap.c | 38 +++++++++++++++++++++++++++++++++++++-
fs/xfs/xfs_iomap.h | 24 ++++++++++++++++++++++++
fs/xfs/xfs_iops.c | 7 +++----
fs/xfs/xfs_reflink.c | 3 +--
6 files changed, 69 insertions(+), 15 deletions(-)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a5e9d7d34023..2a36dc93ff27 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -965,8 +965,7 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
- &xfs_buffered_write_iomap_ops);
+ error = xfs_iomap_zero_range(ip, offset, len, NULL);
if (error)
return error;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 396ef36dcd0a..38d8eca05aee 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -684,11 +684,8 @@ xfs_file_dax_write(
pos = iocb->ki_pos;
trace_xfs_file_dax_write(iocb, from);
- ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
- if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
- i_size_write(inode, iocb->ki_pos);
- error = xfs_setfilesize(ip, pos, ret);
- }
+ ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
+
out:
if (iolock)
xfs_iunlock(ip, iolock);
@@ -1309,7 +1306,7 @@ __xfs_filemap_fault(
ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
(write_fault && !vmf->cow_page) ?
- &xfs_direct_write_iomap_ops :
+ &xfs_dax_write_iomap_ops :
&xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d154f42e2dc6..938723aa137d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -761,7 +761,8 @@ xfs_direct_write_iomap_begin(
/* may drop and re-acquire the ilock */
error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
- &lockmode, flags & IOMAP_DIRECT);
+ &lockmode,
+ (flags & IOMAP_DIRECT) || IS_DAX(inode));
if (error)
goto out_unlock;
if (shared)
@@ -854,6 +855,41 @@ const struct iomap_ops xfs_direct_write_iomap_ops = {
.iomap_begin = xfs_direct_write_iomap_begin,
};
+static int
+xfs_dax_write_iomap_end(
+ struct inode *inode,
+ loff_t pos,
+ loff_t length,
+ ssize_t written,
+ unsigned int flags,
+ struct iomap *iomap)
+{
+ int error = 0;
+ struct xfs_inode *ip = XFS_I(inode);
+ bool cow = xfs_is_cow_inode(ip);
+
+ if (!written)
+ return 0;
+
+ if (pos + written > i_size_read(inode) && !(flags & IOMAP_FAULT)) {
+ i_size_write(inode, pos + written);
+ error = xfs_setfilesize(ip, pos, written);
+ if (error && cow) {
+ xfs_reflink_cancel_cow_range(ip, pos, written, true);
+ return error;
+ }
+ }
+ if (cow)
+ error = xfs_reflink_end_cow(ip, pos, written);
+
+ return error;
+}
+
+const struct iomap_ops xfs_dax_write_iomap_ops = {
+ .iomap_begin = xfs_direct_write_iomap_begin,
+ .iomap_end = xfs_dax_write_iomap_end,
+};
+
static int
xfs_buffered_write_iomap_begin(
struct inode *inode,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7d3703556d0e..fbacf638ab21 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -42,8 +42,32 @@ xfs_aligned_fsb_count(
extern const struct iomap_ops xfs_buffered_write_iomap_ops;
extern const struct iomap_ops xfs_direct_write_iomap_ops;
+extern const struct iomap_ops xfs_dax_write_iomap_ops;
extern const struct iomap_ops xfs_read_iomap_ops;
extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops;
+static inline int
+xfs_iomap_zero_range(
+ struct xfs_inode *ip,
+ loff_t offset,
+ loff_t len,
+ bool *did_zero)
+{
+ return iomap_zero_range(VFS_I(ip), offset, len, did_zero,
+ IS_DAX(VFS_I(ip)) ? &xfs_dax_write_iomap_ops
+ : &xfs_buffered_write_iomap_ops);
+}
+
+static inline int
+xfs_iomap_truncate_page(
+ struct xfs_inode *ip,
+ loff_t pos,
+ bool *did_zero)
+{
+ return iomap_truncate_page(VFS_I(ip), pos, did_zero,
+ IS_DAX(VFS_I(ip)) ? &xfs_dax_write_iomap_ops
+ : &xfs_buffered_write_iomap_ops);
+}
+
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index dfe24b7f26e5..6d936c3e1a6e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -911,8 +911,8 @@ xfs_setattr_size(
*/
if (newsize > oldsize) {
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
- error = iomap_zero_range(inode, oldsize, newsize - oldsize,
- &did_zeroing, &xfs_buffered_write_iomap_ops);
+ error = xfs_iomap_zero_range(ip, oldsize, newsize - oldsize,
+ &did_zeroing);
} else {
/*
* iomap won't detect a dirty page over an unwritten block (or a
@@ -924,8 +924,7 @@ xfs_setattr_size(
newsize);
if (error)
return error;
- error = iomap_truncate_page(inode, newsize, &did_zeroing,
- &xfs_buffered_write_iomap_ops);
+ error = xfs_iomap_truncate_page(ip, newsize, &did_zeroing);
}
if (error)
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index d25434f93235..9a780948dbd0 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1266,8 +1266,7 @@ xfs_reflink_zero_posteof(
return 0;
trace_xfs_zero_eof(ip, isize, pos - isize);
- return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
- &xfs_buffered_write_iomap_ops);
+ return xfs_iomap_zero_range(ip, isize, pos - isize, NULL);
}
/*
--
2.31.1
Powered by blists - more mailing lists