[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220418121207.048407777@linuxfoundation.org>
Date: Mon, 18 Apr 2022 14:10:28 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Jan Kara <jack@...e.cz>,
Trond Myklebust <trond.myklebust@...merspace.com>,
Chuck Lever <chuck.lever@...cle.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.17 059/219] nfsd: Fix a write performance regression
From: Trond Myklebust <trond.myklebust@...merspace.com>
[ Upstream commit 6b8a94332ee4f7d9a8ae0cbac7609f79c212f06c ]
The call to filemap_flush() in nfsd_file_put() is there to ensure that
we clear out any writes belonging to a NFSv3 client relatively quickly
and avoid situations where the file can't be evicted by the garbage
collector. It also ensures that we detect write errors quickly.
The problem is this causes a regression in performance for some
workloads.
So try to improve matters by deferring writeback until we're ready to
close the file, and need to detect errors so that we can force the
client to resend.
Tested-by: Jan Kara <jack@...e.cz>
Fixes: b6669305d35a ("nfsd: Reduce the number of calls to nfsd_file_gc()")
Signed-off-by: Trond Myklebust <trond.myklebust@...merspace.com>
Link: https://lore.kernel.org/all/20220330103457.r4xrhy2d6nhtouzk@quack3.lan
Signed-off-by: Chuck Lever <chuck.lever@...cle.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
fs/nfsd/filecache.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index cc2831cec669..496f7b3f7523 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -235,6 +235,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
}
+static void
+nfsd_file_flush(struct nfsd_file *nf)
+{
+ if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+ nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+}
+
static void
nfsd_file_do_unhash(struct nfsd_file *nf)
{
@@ -302,11 +309,14 @@ nfsd_file_put(struct nfsd_file *nf)
return;
}
- filemap_flush(nf->nf_file->f_mapping);
is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
- nfsd_file_put_noref(nf);
- if (is_hashed)
+ if (!is_hashed) {
+ nfsd_file_flush(nf);
+ nfsd_file_put_noref(nf);
+ } else {
+ nfsd_file_put_noref(nf);
nfsd_file_schedule_laundrette();
+ }
if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
nfsd_file_gc();
}
@@ -327,6 +337,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
+ nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
}
}
@@ -340,6 +351,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
+ nfsd_file_flush(nf);
if (!refcount_dec_and_test(&nf->nf_ref))
continue;
if (nfsd_file_free(nf))
--
2.35.1
Powered by blists - more mailing lists