[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210726141141.2839385-2-arnd@kernel.org>
Date: Mon, 26 Jul 2021 16:11:32 +0200
From: Arnd Bergmann <arnd@...nel.org>
To: Russell King <linux@...linux.org.uk>
Cc: Arnd Bergmann <arnd@...db.de>, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, linux-arch@...r.kernel.org,
linux-mm@...ck.org, Alexander Viro <viro@...iv.linux.org.uk>,
Linus Walleij <linus.walleij@...aro.org>,
Christoph Hellwig <hch@....de>
Subject: [PATCH v5 01/10] mm/maccess: fix unaligned copy_{from,to}_kernel_nofault
From: Arnd Bergmann <arnd@...db.de>
On machines such as ARMv5 that trap unaligned accesses, these
two functions can be slow when each access needs to be emulated,
or they might not work at all.
Change them so that each loop is only used when both the src
and dst pointers are naturally aligned.
Reviewed-by: Christoph Hellwig <hch@....de>
Reviewed-by: Linus Walleij <linus.walleij@...aro.org>
Signed-off-by: Arnd Bergmann <arnd@...db.de>
---
mm/maccess.c | 28 ++++++++++++++++++++++------
1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/mm/maccess.c b/mm/maccess.c
index 3bd70405f2d8..d3f1a1f0b1c1 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -24,13 +24,21 @@ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
{
+ unsigned long align = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ align = (unsigned long)dst | (unsigned long)src;
+
if (!copy_from_kernel_nofault_allowed(src, size))
return -ERANGE;
pagefault_disable();
- copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
- copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
- copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
+ if (!(align & 7))
+ copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
+ if (!(align & 3))
+ copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
+ if (!(align & 1))
+ copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
pagefault_enable();
return 0;
@@ -50,10 +58,18 @@ EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
{
+ unsigned long align = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ align = (unsigned long)dst | (unsigned long)src;
+
pagefault_disable();
- copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
- copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
- copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
+ if (!(align & 7))
+ copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
+ if (!(align & 3))
+ copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
+ if (!(align & 1))
+ copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
pagefault_enable();
return 0;
--
2.29.2
Powered by blists - more mailing lists