[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240823200819.532106-5-dhowells@redhat.com>
Date: Fri, 23 Aug 2024 21:08:12 +0100
From: David Howells <dhowells@...hat.com>
To: Christian Brauner <christian@...uner.io>,
Steve French <sfrench@...ba.org>
Cc: David Howells <dhowells@...hat.com>,
Pankaj Raghav <p.raghav@...sung.com>,
Paulo Alcantara <pc@...guebit.com>,
Jeff Layton <jlayton@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
netfs@...ts.linux.dev,
linux-afs@...ts.infradead.org,
linux-cifs@...r.kernel.org,
linux-nfs@...r.kernel.org,
ceph-devel@...r.kernel.org,
v9fs@...ts.linux.dev,
linux-erofs@...ts.ozlabs.org,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Marc Dionne <marc.dionne@...istor.com>
Subject: [PATCH 4/9] netfs: Fix trimming of streaming-write folios in netfs_inval_folio()
When netfslib writes to a folio that it doesn't have data for, but that
data exists on the server, it will make a 'streaming write' whereby it
stores data in a folio that is marked dirty, but not uptodate. When it
does this, it attaches a record to folio->private to track the dirty
region.
When truncate() or fallocate() wants to invalidate part of such a folio, it
will call into ->invalidate_folio(), specifying the part of the folio that
is to be invalidated. netfs_invalidate_folio(), on behalf of the
filesystem, must then determine how to trim the streaming write record. In
a couple of cases, however, it does this incorrectly (the reduce-length and
move-start cases are switched over and don't, in any case, calculate the
value correctly).
Fix this by making the logic tree more obvious and fixing the cases.
Fixes: 9ebff83e6481 ("netfs: Prep to use folio->private for write grouping and streaming write")
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Matthew Wilcox (Oracle) <willy@...radead.org>
cc: Pankaj Raghav <p.raghav@...sung.com>
cc: Jeff Layton <jlayton@...nel.org>
cc: Marc Dionne <marc.dionne@...istor.com>
cc: linux-afs@...ts.infradead.org
cc: netfs@...ts.linux.dev
cc: linux-mm@...ck.org
cc: linux-fsdevel@...r.kernel.org
---
fs/netfs/misc.c | 50 ++++++++++++++++++++++++++++++++++---------------
1 file changed, 35 insertions(+), 15 deletions(-)
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 69324761fcf7..c1f321cf5999 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -97,10 +97,20 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback);
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
struct netfs_folio *finfo;
+ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio->index, offset, length);
+ if (offset == 0 && length == flen) {
+ unsigned long long i_size = i_size_read(&ctx->inode);
+ unsigned long long fpos = folio_pos(folio), end;
+
+ end = umin(fpos + flen, i_size);
+ if (fpos < i_size && end > ctx->zero_point)
+ ctx->zero_point = end;
+ }
+
folio_wait_private_2(folio); /* [DEPRECATED] */
if (!folio_test_private(folio))
@@ -115,18 +125,34 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
/* We have a partially uptodate page from a streaming write. */
unsigned int fstart = finfo->dirty_offset;
unsigned int fend = fstart + finfo->dirty_len;
- unsigned int end = offset + length;
+ unsigned int iend = offset + length;
if (offset >= fend)
return;
- if (end <= fstart)
+ if (iend <= fstart)
+ return;
+
+ /* The invalidation region overlaps the data. If the region
+ * covers the start of the data, we either move along the start
+ * or just erase the data entirely.
+ */
+ if (offset <= fstart) {
+ if (iend >= fend)
+ goto erase_completely;
+ /* Move the start of the data. */
+ finfo->dirty_len = fend - iend;
+ finfo->dirty_offset = offset;
+ return;
+ }
+
+ /* Reduce the length of the data if the invalidation region
+ * covers the tail part.
+ */
+ if (iend >= fend) {
+ finfo->dirty_len = offset - fstart;
return;
- if (offset <= fstart && end >= fend)
- goto erase_completely;
- if (offset <= fstart && end > fstart)
- goto reduce_len;
- if (offset > fstart && end >= fend)
- goto move_start;
+ }
+
/* A partial write was split. The caller has already zeroed
* it, so just absorb the hole.
*/
@@ -139,12 +165,6 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
folio_clear_uptodate(folio);
kfree(finfo);
return;
-reduce_len:
- finfo->dirty_len = offset + length - finfo->dirty_offset;
- return;
-move_start:
- finfo->dirty_len -= offset - finfo->dirty_offset;
- finfo->dirty_offset = offset;
}
EXPORT_SYMBOL(netfs_invalidate_folio);
@@ -164,7 +184,7 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
if (folio_test_dirty(folio))
return false;
- end = folio_pos(folio) + folio_size(folio);
+ end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
if (end > ctx->zero_point)
ctx->zero_point = end;
Powered by blists - more mailing lists