[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231207212206.1379128-36-dhowells@redhat.com>
Date: Thu, 7 Dec 2023 21:21:42 +0000
From: David Howells <dhowells@...hat.com>
To: Jeff Layton <jlayton@...nel.org>,
Steve French <smfrench@...il.com>
Cc: David Howells <dhowells@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Marc Dionne <marc.dionne@...istor.com>,
Paulo Alcantara <pc@...guebit.com>,
Shyam Prasad N <sprasad@...rosoft.com>,
Tom Talpey <tom@...pey.com>,
Dominique Martinet <asmadeus@...ewreck.org>,
Eric Van Hensbergen <ericvh@...nel.org>,
Ilya Dryomov <idryomov@...il.com>,
Christian Brauner <christian@...uner.io>,
linux-cachefs@...hat.com,
linux-afs@...ts.infradead.org,
linux-cifs@...r.kernel.org,
linux-nfs@...r.kernel.org,
ceph-devel@...r.kernel.org,
v9fs@...ts.linux.dev,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 35/59] netfs: Provide minimum blocksize parameter
Add a parameter for minimum blocksize in the netfs_i_context struct. This
can be used, for instance, to force I/O alignment for content encryption.
It also requires the use of an RMW cycle if a write we want to do doesn't
meet the block alignment requirements.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Jeff Layton <jlayton@...nel.org>
cc: linux-cachefs@...hat.com
cc: linux-fsdevel@...r.kernel.org
cc: linux-mm@...ck.org
---
fs/netfs/buffered_read.c | 26 ++++++++++++++++++++++----
fs/netfs/buffered_write.c | 3 ++-
fs/netfs/direct_read.c | 3 ++-
include/linux/netfs.h | 2 ++
4 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index a59e7b2edaac..0d47e5ea6870 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -521,14 +521,26 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
struct address_space *mapping = folio_file_mapping(folio);
struct netfs_inode *ctx = netfs_inode(mapping->host);
unsigned long long start = folio_pos(folio);
- size_t flen = folio_size(folio);
+ unsigned long long i_size, rstart, end;
+ size_t rlen;
int ret;
- _enter("%zx @%llx", flen, start);
+ DEFINE_READAHEAD(ractl, file, NULL, mapping, folio_index(folio));
+
+ _enter("%zx @%llx", len, start);
ret = -ENOMEM;
- rreq = netfs_alloc_request(mapping, file, start, flen,
+ i_size = i_size_read(mapping->host);
+ end = round_up(start + len, 1U << ctx->min_bshift);
+ if (end > i_size) {
+ unsigned long long limit = round_up(start + len, PAGE_SIZE);
+ end = max(limit, round_up(i_size, PAGE_SIZE));
+ }
+ rstart = round_down(start, 1U << ctx->min_bshift);
+ rlen = end - rstart;
+
+ rreq = netfs_alloc_request(mapping, file, rstart, rlen,
NETFS_READ_FOR_WRITE);
if (IS_ERR(rreq)) {
ret = PTR_ERR(rreq);
@@ -542,7 +554,13 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
goto error_put;
netfs_stat(&netfs_n_rh_write_begin);
- trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
+ trace_netfs_read(rreq, rstart, rlen, netfs_read_trace_prefetch_for_write);
+
+ /* Expand the request to meet caching requirements and download
+ * preferences.
+ */
+ ractl._nr_pages = folio_nr_pages(folio);
+ netfs_rreq_expand(rreq, &ractl);
/* Set up the output buffer */
iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 42f89f8ea8af..8339e3f753af 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -80,7 +80,8 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
if (file->f_mode & FMODE_READ)
return NETFS_JUST_PREFETCH;
- if (netfs_is_cache_enabled(ctx))
+ if (netfs_is_cache_enabled(ctx) ||
+ ctx->min_bshift > 0)
return NETFS_JUST_PREFETCH;
if (!finfo)
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 1d26468aafd9..52ad8fa66dd5 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -185,7 +185,8 @@ static ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_
* will then need to pad the request out to the minimum block size.
*/
if (test_bit(NETFS_RREQ_USE_BOUNCE_BUFFER, &rreq->flags)) {
- start = rreq->start;
+ min_bsize = 1ULL << ctx->min_bshift;
+ start = round_down(rreq->start, min_bsize);
end = min_t(unsigned long long,
round_up(rreq->start + rreq->len, min_bsize),
ctx->remote_i_size);
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index ef17d94a2fbd..69ff5d652931 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -139,6 +139,7 @@ struct netfs_inode {
unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+ unsigned char min_bshift; /* log2 min block size for bounding box or 0 */
};
/*
@@ -462,6 +463,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
ctx->ops = ops;
ctx->remote_i_size = i_size_read(&ctx->inode);
ctx->flags = 0;
+ ctx->min_bshift = 0;
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
Powered by blists - more mailing lists