lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240924121432.798655-2-jvetter@kalrayinc.com>
Date: Tue, 24 Sep 2024 14:14:28 +0200
From: Julian Vetter <jvetter@...rayinc.com>
To: Arnd Bergmann <arnd@...db.de>, Catalin Marinas
 <catalin.marinas@....com>, Will Deacon <will@...nel.org>, Guo Ren
 <guoren@...nel.org>, Huacai Chen <chenhuacai@...nel.org>, WANG Xuerui
 <kernel@...0n.name>, Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
 linux-csky@...r.kernel.org, loongarch@...ts.linux.dev, Yann Sionneau
 <ysionneau@...rayinc.com>, Julian Vetter <jvetter@...rayinc.com>
Subject: [PATCH v5 1/5] Consolidate __memcpy_{to,from}io and __memset_io
 into iomap_copy.c

Various architectures have almost the same implementations for
__memcpy_{to,from}io and __memset_io functions. So, consolidate them
into the existing lib/iomap_copy.c.

Reviewed-by: Yann Sionneau <ysionneau@...rayinc.com>
Signed-off-by: Julian Vetter <jvetter@...rayinc.com>
---
Changes for v5:
- Add function prototypes to asm-generic/io.h
- Instead of having yet another file, we add the functions to
  iomap_copy.c as proposed by Arndt
---
 include/asm-generic/io.h |  12 +++++
 lib/iomap_copy.c         | 107 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 119 insertions(+)

diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 80de699bf6af..9b8e0449da28 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -102,6 +102,18 @@ static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __i
 
 #endif /* CONFIG_TRACE_MMIO_ACCESS */
 
+#ifndef __memcpy_fromio
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
+#endif
+
+#ifndef __memcpy_toio
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
+#endif
+
+#ifndef __memset_io
+void __memset_io(volatile void __iomem *dst, int c, size_t count);
+#endif
+
 /*
  * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  *
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 2fd5712fb7c0..fabcc1e95668 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -3,9 +3,14 @@
  * Copyright 2006 PathScale, Inc.  All Rights Reserved.
  */
 
+#include <asm/unaligned.h>
+
 #include <linux/export.h>
+#include <linux/types.h>
 #include <linux/io.h>
 
+#define NATIVE_STORE_SIZE	(BITS_PER_LONG/8)
+
 /**
  * __iowrite32_copy - copy data to MMIO space, in 32-bit units
  * @to: destination, in MMIO space (must be 32-bit aligned)
@@ -76,3 +81,105 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
 }
 EXPORT_SYMBOL_GPL(__iowrite64_copy);
 #endif
+
+
+#ifndef __memcpy_fromio
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+	while (count && !IS_ALIGNED((unsigned long)from, NATIVE_STORE_SIZE)) {
+		*(u8 *)to = __raw_readb(from);
+		from++;
+		to++;
+		count--;
+	}
+
+	while (count >= NATIVE_STORE_SIZE) {
+#ifdef CONFIG_64BIT
+			put_unaligned(__raw_readq(from), (uintptr_t *)to);
+#else
+			put_unaligned(__raw_readl(from), (uintptr_t *)to);
+#endif
+
+		from += NATIVE_STORE_SIZE;
+		to += NATIVE_STORE_SIZE;
+		count -= NATIVE_STORE_SIZE;
+	}
+
+	while (count) {
+		*(u8 *)to = __raw_readb(from);
+		from++;
+		to++;
+		count--;
+	}
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+#endif
+
+#ifndef __memcpy_toio
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+	while (count && !IS_ALIGNED((unsigned long)to, NATIVE_STORE_SIZE)) {
+		__raw_writeb(*(u8 *)from, to);
+		from++;
+		to++;
+		count--;
+	}
+
+	while (count >= NATIVE_STORE_SIZE) {
+#ifdef CONFIG_64BIT
+			__raw_writeq(get_unaligned((uintptr_t *)from), to);
+#else
+			__raw_writel(get_unaligned((uintptr_t *)from), to);
+#endif
+
+		from += NATIVE_STORE_SIZE;
+		to += NATIVE_STORE_SIZE;
+		count -= NATIVE_STORE_SIZE;
+	}
+
+	while (count) {
+		__raw_writeb(*(u8 *)from, to);
+		from++;
+		to++;
+		count--;
+	}
+}
+EXPORT_SYMBOL(__memcpy_toio);
+#endif
+
+#ifndef __memset_io
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+	uintptr_t qc = (u8)c;
+
+	qc |= qc << 8;
+	qc |= qc << 16;
+
+	if (IS_ENABLED(CONFIG_64BIT))
+		qc |= qc << 32;
+
+	while (count && !IS_ALIGNED((unsigned long)dst, NATIVE_STORE_SIZE)) {
+		__raw_writeb(c, dst);
+		dst++;
+		count--;
+	}
+
+	while (count >= NATIVE_STORE_SIZE) {
+#ifdef CONFIG_64BIT
+			__raw_writeq(qc, dst);
+#else
+			__raw_writel(qc, dst);
+#endif
+
+		dst += NATIVE_STORE_SIZE;
+		count -= NATIVE_STORE_SIZE;
+	}
+
+	while (count) {
+		__raw_writeb(c, dst);
+		dst++;
+		count--;
+	}
+}
+EXPORT_SYMBOL(__memset_io);
+#endif
-- 
2.34.1






Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ