lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <161781047695.463527.7463536103593997492.stgit@warthog.procyon.org.uk>
Date:   Wed, 07 Apr 2021 16:47:56 +0100
From:   David Howells <dhowells@...hat.com>
To:     jlayton@...nel.org
Cc:     dwysocha@...hat.com, linux-cachefs@...hat.com,
        v9fs-developer@...ts.sourceforge.net,
        linux-afs@...ts.infradead.org, linux-cifs@...r.kernel.org,
        ceph-devel@...r.kernel.org, linux-nfs@...r.kernel.org,
        linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 4/5] netfs: Fix copy-to-cache amalgamation

Fix the amalgamation of subrequests when copying to the cache.  We
shouldn't be rounding up the size to PAGE_SIZE as we go along as that ends
up with the composite subrequest length being too long - and this leads to
EIO from the cache write because the source iterator doesn't contain enough
data.

Instead, we only need to deal with contiguous subreqs and then ask the
cache to round off as it needs - which also means we don't have to make any
assumptions about the cache granularity.

Signed-off-by: David Howells <dhowells@...hat.com>
---

 fs/cachefiles/io.c           |   17 +++++++++++++++++
 fs/netfs/read_helper.c       |   19 +++++++++----------
 include/linux/netfs.h        |    6 ++++++
 include/trace/events/netfs.h |    2 ++
 4 files changed, 34 insertions(+), 10 deletions(-)

diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 620959d1e95b..b13fb45fc3f3 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -330,6 +330,22 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
 	return NETFS_DOWNLOAD_FROM_SERVER;
 }
 
+/*
+ * Prepare for a write to occur.
+ */
+static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
+				    loff_t *_start, size_t *_len, loff_t i_size)
+{
+	loff_t start = *_start;
+	size_t len = *_len, down;
+
+	/* Round to DIO size */
+	down = start - round_down(start, PAGE_SIZE);
+	*_start = start - down;
+	*_len = round_up(down + len, PAGE_SIZE);
+	return 0;
+}
+
 /*
  * Clean up an operation.
  */
@@ -355,6 +371,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
 	.read			= cachefiles_read,
 	.write			= cachefiles_write,
 	.prepare_read		= cachefiles_prepare_read,
+	.prepare_write		= cachefiles_prepare_write,
 };
 
 /*
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index ad0dc01319ce..ce2f31d20250 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -293,7 +293,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
 	struct netfs_cache_resources *cres = &rreq->cache_resources;
 	struct netfs_read_subrequest *subreq, *next, *p;
 	struct iov_iter iter;
-	loff_t pos;
+	int ret;
 
 	trace_netfs_rreq(rreq, netfs_rreq_trace_write);
 
@@ -311,23 +311,22 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
 
 	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
 		/* Amalgamate adjacent writes */
-		pos = round_down(subreq->start, PAGE_SIZE);
-		if (pos != subreq->start) {
-			subreq->len += subreq->start - pos;
-			subreq->start = pos;
-		}
-		subreq->len = round_up(subreq->len, PAGE_SIZE);
-
 		while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
 			next = list_next_entry(subreq, rreq_link);
-			if (next->start > subreq->start + subreq->len)
+			if (next->start != subreq->start + subreq->len)
 				break;
 			subreq->len += next->len;
-			subreq->len = round_up(subreq->len, PAGE_SIZE);
 			list_del_init(&next->rreq_link);
 			netfs_put_subrequest(next, false);
 		}
 
+		ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+					       rreq->i_size);
+		if (ret < 0) {
+			trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+			continue;
+		}
+
 		iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
 				subreq->start, subreq->len);
 
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 2299e7662ff0..9062adfa2fb9 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -206,6 +206,12 @@ struct netfs_cache_ops {
 	 */
 	enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
 					       loff_t i_size);
+
+	/* Prepare a write operation, working out what part of the write we can
+	 * actually do.
+	 */
+	int (*prepare_write)(struct netfs_cache_resources *cres,
+			     loff_t *_start, size_t *_len, loff_t i_size);
 };
 
 struct readahead_control;
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index a2bf6cd84bd4..e3ebeabd3852 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -43,6 +43,7 @@ enum netfs_sreq_trace {
 	netfs_sreq_trace_submit,
 	netfs_sreq_trace_terminated,
 	netfs_sreq_trace_write,
+	netfs_sreq_trace_write_skip,
 	netfs_sreq_trace_write_term,
 };
 
@@ -77,6 +78,7 @@ enum netfs_sreq_trace {
 	EM(netfs_sreq_trace_submit,		"SUBMT")	\
 	EM(netfs_sreq_trace_terminated,		"TERM ")	\
 	EM(netfs_sreq_trace_write,		"WRITE")	\
+	EM(netfs_sreq_trace_write_skip,		"SKIP ")	\
 	E_(netfs_sreq_trace_write_term,		"WTERM")
 
 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ