[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230913165648.2570623-8-dhowells@redhat.com>
Date: Wed, 13 Sep 2023 17:56:42 +0100
From: David Howells <dhowells@...hat.com>
To: Al Viro <viro@...iv.linux.org.uk>,
Linus Torvalds <torvalds@...ux-foundation.org>
Cc: David Howells <dhowells@...hat.com>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Christian Brauner <christian@...uner.io>,
David Laight <David.Laight@...LAB.COM>,
Matthew Wilcox <willy@...radead.org>,
Jeff Layton <jlayton@...nel.org>,
linux-fsdevel@...r.kernel.org,
linux-block@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v4 07/13] iov_iter: Make copy_from_iter() always handle MCE
Make copy_from_iter() always catch an MCE and return a short copy and make
the coredump code rely on that. This requires arch support in the form of
a memcpy_mc() function that returns the length copied.
[?] Is it better to kill the thread in the event of an MCE occurring?
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Alexander Viro <viro@...iv.linux.org.uk>
cc: Jens Axboe <axboe@...nel.dk>
cc: Christoph Hellwig <hch@....de>
cc: Christian Brauner <christian@...uner.io>
cc: Matthew Wilcox <willy@...radead.org>
cc: Linus Torvalds <torvalds@...ux-foundation.org>
cc: David Laight <David.Laight@...LAB.COM>
cc: linux-block@...r.kernel.org
cc: linux-fsdevel@...r.kernel.org
cc: linux-mm@...ck.org
---
arch/x86/include/asm/mce.h | 23 +++++++++++++++++++++++
fs/coredump.c | 1 -
lib/iov_iter.c | 12 +++++-------
3 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 180b1cbfcc4e..77ce2044536c 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -353,4 +353,27 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am
unsigned long copy_mc_fragile_handle_tail(char *to, char *from, unsigned len);
+static __always_inline __must_check
+size_t memcpy_mc(void *to, const void *from, size_t len)
+{
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+ /*
+ * If CPU has FSRM feature, use 'rep movs'.
+ * Otherwise, use rep_movs_alternative.
+ */
+ asm volatile(
+ "1:\n\t"
+ ALTERNATIVE("rep movsb",
+ "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
+ "2:\n"
+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_DEFAULT_MCE_SAFE)
+ :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
+ : : "memory", "rax", "r8", "r9", "r10", "r11");
+#else
+ memcpy(to, from, len);
+ return 0;
+#endif
+ return len;
+}
+
#endif /* _ASM_X86_MCE_H */
diff --git a/fs/coredump.c b/fs/coredump.c
index 9d235fa14ab9..ad54102a5e14 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -884,7 +884,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
pos = file->f_pos;
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
- iov_iter_set_copy_mc(&iter);
n = __kernel_write_iter(cprm->file, &iter, &pos);
if (n != PAGE_SIZE)
return 0;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 65374ee91ecd..b574601783bc 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -14,6 +14,7 @@
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
#include <linux/iov_iter.h>
+#include <asm/mce.h>
static __always_inline
size_t copy_to_user_iter(void __user *iter_to, size_t progress,
@@ -253,14 +254,11 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
-static size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
- size_t len, void *to, void *priv2)
+static __always_inline
+size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- struct iov_iter *iter = priv2;
-
- if (iov_iter_is_copy_mc(iter))
- return copy_mc_to_kernel(to + progress, iter_from, len);
- return memcpy_from_iter(iter_from, progress, len, to, priv2);
+ return memcpy_mc(to + progress, iter_from, len);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
Powered by blists - more mailing lists