lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241021133154.516847-2-jvetter@kalrayinc.com>
Date: Mon, 21 Oct 2024 15:31:51 +0200
From: Julian Vetter <jvetter@...rayinc.com>
To: Arnd Bergmann <arnd@...db.de>, Catalin Marinas
 <catalin.marinas@....com>, Will Deacon <will@...nel.org>, Guo Ren
 <guoren@...nel.org>, Huacai Chen <chenhuacai@...nel.org>, WANG Xuerui
 <kernel@...0n.name>, Andrew Morton <akpm@...ux-foundation.org>, Geert
 Uytterhoeven <geert@...ux-m68k.org>, Richard Henderson
 <richard.henderson@...aro.org>, Niklas Schnelle <schnelle@...ux.ibm.com>,
 Takashi Iwai <tiwai@...e.com>, Miquel Raynal <miquel.raynal@...tlin.com>,
 David Laight <David.Laight@...lab.com>, Johannes Berg
 <johannes@...solutions.net>, Christoph Hellwig <hch@...radead.org>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
 linux-csky@...r.kernel.org, loongarch@...ts.linux.dev,
 linux-arch@...r.kernel.org, Yann Sionneau <ysionneau@...rayinc.com>, Julian
 Vetter <jvetter@...rayinc.com>
Subject: [PATCH v10 1/4] Replace fallback for IO memcpy and IO memset

The fallback for IO memcpy and IO memset in asm-generic/io.h simply call
memcpy and memset. This might lead to alignment problems or faults on
architectures that do not define their own version and fall back to
these defaults.
This patch replaces the memcpy and memset. The new versions use
read{l,q} accessor functions, align accesses to machine word size, and
resort to byte accesses when the target memory is not machine word
aligned. So, architectures that were using the old fallback functions
(e.g., arc, mips, riscv, etc.) now have more resilient versions that
take IO memory constrains into account. Moreover, architectures with
similar implementations can use this new fallback versions as well, not
needing to implement their own.

Reviewed-by: Yann Sionneau <ysionneau@...rayinc.com>
Signed-off-by: Julian Vetter <jvetter@...rayinc.com>
---
Changes for v10:
- Removed iomem_copy.c again
- Updated implementations directly in asm-generic/io.h
- Updated commit message to reflect the changes made in the patch
---
 include/asm-generic/io.h | 116 ++++++++++++++++++++++++++++++++-------
 1 file changed, 96 insertions(+), 20 deletions(-)

diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 80de699bf6af..00cbf8587586 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -7,10 +7,12 @@
 #ifndef __ASM_GENERIC_IO_H
 #define __ASM_GENERIC_IO_H
 
+#include <linux/align.h>
 #include <asm/page.h> /* I/O is all done through memory accesses */
 #include <linux/string.h> /* for memset() and memcpy() */
 #include <linux/sizes.h>
 #include <linux/types.h>
+#include <linux/unaligned.h>
 #include <linux/instruction_pointer.h>
 
 #ifdef CONFIG_GENERIC_IOMAP
@@ -1154,16 +1156,40 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 #define memset_io memset_io
 /**
  * memset_io	Set a range of I/O memory to a constant value
- * @addr:	The beginning of the I/O-memory range to set
- * @val:	The value to set the memory to
+ * @dst:	The beginning of the I/O-memory range to set
+ * @c:		The value to set the memory to
  * @count:	The number of bytes to set
  *
  * Set a range of I/O memory to a given value.
  */
-static inline void memset_io(volatile void __iomem *addr, int value,
-			     size_t size)
+static inline void memset_io(volatile void __iomem *dst, int c, size_t count)
 {
-	memset(__io_virt(addr), value, size);
+	long qc = (u8)c;
+
+	qc *= ~0UL / 0xff;
+
+	while (count && !IS_ALIGNED((long)dst, sizeof(long))) {
+		__raw_writeb(c, dst);
+		dst++;
+		count--;
+	}
+
+	while (count >= sizeof(long)) {
+#ifdef CONFIG_64BIT
+		__raw_writeq(qc, dst);
+#else
+		__raw_writel(qc, dst);
+#endif
+
+		dst += sizeof(long);
+		count -= sizeof(long);
+	}
+
+	while (count) {
+		__raw_writeb(c, dst);
+		dst++;
+		count--;
+	}
 }
 #endif
 
@@ -1171,34 +1197,84 @@ static inline void memset_io(volatile void __iomem *addr, int value,
 #define memcpy_fromio memcpy_fromio
 /**
  * memcpy_fromio	Copy a block of data from I/O memory
- * @dst:		The (RAM) destination for the copy
- * @src:		The (I/O memory) source for the data
+ * @to:			The (RAM) destination for the copy
+ * @from:		The (I/O memory) source for the data
  * @count:		The number of bytes to copy
  *
  * Copy a block of data from I/O memory.
  */
-static inline void memcpy_fromio(void *buffer,
-				 const volatile void __iomem *addr,
-				 size_t size)
-{
-	memcpy(buffer, __io_virt(addr), size);
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
+				 size_t count)
+{
+	while (count && !IS_ALIGNED((long)from, sizeof(long))) {
+		*(u8 *)to = __raw_readb(from);
+		from++;
+		to++;
+		count--;
+	}
+
+	while (count >= sizeof(long)) {
+#ifdef CONFIG_64BIT
+		long val = __raw_readq(from);
+#else
+		long val = __raw_readl(from);
+#endif
+		put_unaligned(val, (long *)to);
+
+
+		from += sizeof(long);
+		to += sizeof(long);
+		count -= sizeof(long);
+	}
+
+	while (count) {
+		*(u8 *)to = __raw_readb(from);
+		from++;
+		to++;
+		count--;
+	}
 }
 #endif
 
 #ifndef memcpy_toio
 #define memcpy_toio memcpy_toio
 /**
- * memcpy_toio		Copy a block of data into I/O memory
- * @dst:		The (I/O memory) destination for the copy
- * @src:		The (RAM) source for the data
- * @count:		The number of bytes to copy
+ * memcpy_toio	Copy a block of data into I/O memory
+ * @to:		The (I/O memory) destination for the copy
+ * @from:	The (RAM) source for the data
+ * @count:	The number of bytes to copy
  *
  * Copy a block of data to I/O memory.
  */
-static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
-			       size_t size)
-{
-	memcpy(__io_virt(addr), buffer, size);
+static inline void memcpy_toio(volatile void __iomem *to, const void *from,
+			       size_t count)
+{
+	while (count && !IS_ALIGNED((long)to, sizeof(long))) {
+		__raw_writeb(*(u8 *)from, to);
+		from++;
+		to++;
+		count--;
+	}
+
+	while (count >= sizeof(long)) {
+		long val = get_unaligned((long *)from);
+#ifdef CONFIG_64BIT
+		__raw_writeq(val, to);
+#else
+		__raw_writel(val, to);
+#endif
+
+		from += sizeof(long);
+		to += sizeof(long);
+		count -= sizeof(long);
+	}
+
+	while (count) {
+		__raw_writeb(*(u8 *)from, to);
+		from++;
+		to++;
+		count--;
+	}
 }
 #endif
 
-- 
2.34.1






Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ