[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231213152350.431591-16-dhowells@redhat.com>
Date: Wed, 13 Dec 2023 15:23:25 +0000
From: David Howells <dhowells@...hat.com>
To: Jeff Layton <jlayton@...nel.org>,
Steve French <smfrench@...il.com>
Cc: David Howells <dhowells@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Marc Dionne <marc.dionne@...istor.com>,
Paulo Alcantara <pc@...guebit.com>,
Shyam Prasad N <sprasad@...rosoft.com>,
Tom Talpey <tom@...pey.com>,
Dominique Martinet <asmadeus@...ewreck.org>,
Eric Van Hensbergen <ericvh@...nel.org>,
Ilya Dryomov <idryomov@...il.com>,
Christian Brauner <christian@...uner.io>,
linux-cachefs@...hat.com,
linux-afs@...ts.infradead.org,
linux-cifs@...r.kernel.org,
linux-nfs@...r.kernel.org,
ceph-devel@...r.kernel.org,
v9fs@...ts.linux.dev,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 15/39] netfs: Add bounce buffering support
Add a second xarray struct to netfs_io_request for the purposes of holding
a bounce buffer for when we have to deal with encrypted/compressed data or
if we have to up/download data in blocks larger than we were asked for.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Jeff Layton <jlayton@...nel.org>
cc: linux-cachefs@...hat.com
cc: linux-fsdevel@...r.kernel.org
cc: linux-mm@...ck.org
---
fs/netfs/io.c | 6 +++++-
fs/netfs/objects.c | 3 +++
include/linux/netfs.h | 2 ++
3 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index e9d408e211b8..d8e9cd6ce338 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -643,7 +643,11 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
return -EIO;
}
- rreq->io_iter = rreq->iter;
+ if (test_bit(NETFS_RREQ_USE_BOUNCE_BUFFER, &rreq->flags))
+ iov_iter_xarray(&rreq->io_iter, ITER_DEST, &rreq->bounce,
+ rreq->start, rreq->len);
+ else
+ rreq->io_iter = rreq->iter;
INIT_WORK(&rreq->work, netfs_rreq_work);
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 4df5e5eeada6..9f3f33c93317 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -35,12 +35,14 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->inode = inode;
rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
+ xa_init(&rreq->bounce);
INIT_LIST_HEAD(&rreq->subrequests);
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) {
+ xa_destroy(&rreq->bounce);
kfree(rreq);
return ERR_PTR(ret);
}
@@ -94,6 +96,7 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
+ netfs_clear_buffer(&rreq->bounce);
kfree_rcu(rreq, rcu);
netfs_stat_d(&netfs_n_rh_rreq);
}
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index c05150f51beb..8a5b8e7bc358 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -194,6 +194,7 @@ struct netfs_io_request {
struct iov_iter iter; /* Unencrypted-side iterator */
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
+ struct xarray bounce; /* Bounce buffer (eg. for crypto/compression) */
struct bio_vec *direct_bv /* DIO buffer list (when handling iovec-iter) */
__counted_by(direct_bv_count);
unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
@@ -216,6 +217,7 @@ struct netfs_io_request {
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+#define NETFS_RREQ_USE_BOUNCE_BUFFER 6 /* Use bounce buffer */
const struct netfs_request_ops *netfs_ops;
};
Powered by blists - more mailing lists