[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230922120227.1173720-9-dhowells@redhat.com>
Date: Fri, 22 Sep 2023 13:02:22 +0100
From: David Howells <dhowells@...hat.com>
To: Jens Axboe <axboe@...nel.dk>
Cc: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Christoph Hellwig <hch@....de>,
Christian Brauner <christian@...uner.io>,
David Laight <David.Laight@...LAB.COM>,
Matthew Wilcox <willy@...radead.org>,
Jeff Layton <jlayton@...nel.org>,
linux-fsdevel@...r.kernel.org, linux-block@...r.kernel.org,
linux-mm@...ck.org, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v6 08/13] iov_iter: Don't deal with iter->copy_mc in memcpy_from_iter_mc()
iter->copy_mc is only used with a bvec iterator and only by
dump_emit_page() in fs/coredump.c so rather than handle this in
memcpy_from_iter_mc() where it is checked repeatedly by _copy_from_iter()
and copy_page_from_iter_atomic(),
---
lib/iov_iter.c | 39 +++++++++++++++++++++++++++------------
1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 65374ee91ecd..943aa3cfd7b3 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -253,14 +253,33 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
-static size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
- size_t len, void *to, void *priv2)
+static __always_inline
+size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return copy_mc_to_kernel(to + progress, iter_from, len);
+}
+
+static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
{
- struct iov_iter *iter = priv2;
+ size_t progress;
- if (iov_iter_is_copy_mc(iter))
- return copy_mc_to_kernel(to + progress, iter_from, len);
- return memcpy_from_iter(iter_from, progress, len, to, priv2);
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
+ progress = iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+ i->count -= progress;
+ return progress;
+}
+
+static __always_inline
+size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(iov_iter_is_copy_mc(i)))
+ return __copy_from_iter_mc(addr, bytes, i);
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter, memcpy_from_iter);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
@@ -270,9 +289,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
if (user_backed_iter(i))
might_fault();
- return iterate_and_advance2(i, bytes, addr, i,
- copy_from_user_iter,
- memcpy_from_iter_mc);
+ return __copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL(_copy_from_iter);
@@ -493,9 +510,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
}
p = kmap_atomic(page) + offset;
- n = iterate_and_advance2(i, n, p, i,
- copy_from_user_iter,
- memcpy_from_iter_mc);
+ __copy_from_iter(p, n, i);
kunmap_atomic(p);
copied += n;
offset += n;
Powered by blists - more mailing lists