[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f4abda9c0c835d9a50b644fdbec8d43269f6b0f7.1565609891.git.mbobrowski@mbobrowski.org>
Date: Mon, 12 Aug 2019 22:53:11 +1000
From: Matthew Bobrowski <mbobrowski@...browski.org>
To: linux-ext4@...r.kernel.org
Cc: linux-fsdevel@...r.kernel.org, jack@...e.cz, tytso@....edu,
riteshh@...ux.ibm.com
Subject: [PATCH 3/5] iomap: modify ->end_io() calling convention
This patch modifies the calling convention for the iomap ->end_io()
callback. Rather than passing either dio->error or dio->size as the 'size'
argument, we instead pass both dio->error and dio->size values separately.
In the instance that an error occurred during a write, we currently cannot
determine whether any blocks have been allocated beyond the current EOF and
data has subsequently been written to these blocks within the ->end_io()
callback. As a result, we cannot judge whether we should take the truncate
failed write path. Having both dio->error and dio->size will allow us to
perform such checks within this callback.
Signed-off-by: Matthew Bobrowski <mbobrowski@...browski.org>
---
fs/iomap/direct-io.c | 9 +++------
fs/xfs/xfs_file.c | 17 +++++++++--------
include/linux/iomap.h | 4 ++--
3 files changed, 14 insertions(+), 16 deletions(-)
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 10517cea9682..2ccf1c6460d4 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -77,13 +77,10 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
loff_t offset = iocb->ki_pos;
ssize_t ret;
- if (dio->end_io) {
- ret = dio->end_io(iocb,
- dio->error ? dio->error : dio->size,
- dio->flags);
- } else {
+ if (dio->end_io)
+ ret = dio->end_io(iocb, dio->size, dio->error, dio->flags);
+ else
ret = dio->error;
- }
if (likely(!ret)) {
ret = dio->size;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 28101bbc0b78..f2bc3ac4a60e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -369,21 +369,22 @@ static int
xfs_dio_write_end_io(
struct kiocb *iocb,
ssize_t size,
+ ssize_t error,
unsigned flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct xfs_inode *ip = XFS_I(inode);
loff_t offset = iocb->ki_pos;
unsigned int nofs_flag;
- int error = 0;
+ int ret = 0;
trace_xfs_end_io_direct_write(ip, offset, size);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if (size <= 0)
- return size;
+ if (error || !size)
+ return error ? error : size;
/*
* Capture amount written on completion as we can't reliably account
@@ -399,8 +400,8 @@ xfs_dio_write_end_io(
nofs_flag = memalloc_nofs_save();
if (flags & IOMAP_DIO_COW) {
- error = xfs_reflink_end_cow(ip, offset, size);
- if (error)
+ ret = xfs_reflink_end_cow(ip, offset, size);
+ if (ret)
goto out;
}
@@ -411,7 +412,7 @@ xfs_dio_write_end_io(
* they are converted.
*/
if (flags & IOMAP_DIO_UNWRITTEN) {
- error = xfs_iomap_write_unwritten(ip, offset, size, true);
+ ret = xfs_iomap_write_unwritten(ip, offset, size, true);
goto out;
}
@@ -430,14 +431,14 @@ xfs_dio_write_end_io(
if (offset + size > i_size_read(inode)) {
i_size_write(inode, offset + size);
spin_unlock(&ip->i_flags_lock);
- error = xfs_setfilesize(ip, offset, size);
+ ret = xfs_setfilesize(ip, offset, size);
} else {
spin_unlock(&ip->i_flags_lock);
}
out:
memalloc_nofs_restore(nofs_flag);
- return error;
+ return ret;
}
/*
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index bc499ceae392..900284e5c06c 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -188,8 +188,8 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
-typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
- unsigned flags);
+typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t size,
+ ssize_t error, unsigned int flags);
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
--
2.16.4
--
Matthew Bobrowski
Powered by blists - more mailing lists