lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230826-optimize_checksum-v1-2-937501b4522a@rivosinc.com>
Date:   Sat, 26 Aug 2023 18:26:07 -0700
From:   Charlie Jenkins <charlie@...osinc.com>
To:     linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Cc:     Paul Walmsley <paul.walmsley@...ive.com>,
        Palmer Dabbelt <palmer@...belt.com>,
        Albert Ou <aou@...s.berkeley.edu>,
        Charlie Jenkins <charlie@...osinc.com>
Subject: [PATCH 2/5] riscv: Add checksum library

Provide a 32 and 64 bit version of do_csum. When compiled for 32-bit
will load from the buffer in groups of 32 bits, and when compiled for
64-bit will load in groups of 64 bits.

Signed-off-by: Charlie Jenkins <charlie@...osinc.com>
---
 arch/riscv/include/asm/checksum.h |   2 -
 arch/riscv/lib/Makefile           |   1 +
 arch/riscv/lib/csum.c             | 118 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 119 insertions(+), 2 deletions(-)

diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
index cd98f8cde888..af49b3409576 100644
--- a/arch/riscv/include/asm/checksum.h
+++ b/arch/riscv/include/asm/checksum.h
@@ -76,10 +76,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 #endif
 #define ip_fast_csum ip_fast_csum
 
-#ifdef CONFIG_64BIT
 extern unsigned int do_csum(const unsigned char *buff, int len);
 #define do_csum do_csum
-#endif
 
 #include <asm-generic/checksum.h>
 
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 26cb2502ecf8..2aa1a4ad361f 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -6,6 +6,7 @@ lib-y			+= memmove.o
 lib-y			+= strcmp.o
 lib-y			+= strlen.o
 lib-y			+= strncmp.o
+lib-y			+= csum.o
 lib-$(CONFIG_MMU)	+= uaccess.o
 lib-$(CONFIG_64BIT)	+= tishift.o
 lib-$(CONFIG_RISCV_ISA_ZICBOZ)	+= clear_page.o
diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c
new file mode 100644
index 000000000000..2037041ce8a0
--- /dev/null
+++ b/arch/riscv/lib/csum.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IP checksum library
+ *
+ * Influenced by arch/arm64/lib/csum.c
+ * Copyright (C) 2023 Rivos Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <net/checksum.h>
+
+/* Default version is sufficient for 32 bit */
+#ifdef CONFIG_64BIT
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+			const struct in6_addr *daddr,
+			__u32 len, __u8 proto, __wsum csum)
+{
+	unsigned long sum, ulen, uproto;
+
+	uproto = (unsigned long)htonl(proto);
+	ulen = (unsigned long)htonl(len);
+	sum = (unsigned long)csum;
+
+	sum += *(const unsigned long *)saddr->s6_addr;
+	sum += sum < csum;
+
+	sum += *((const unsigned long *)saddr->s6_addr + 1);
+	sum += sum < *((const unsigned long *)saddr->s6_addr + 1);
+
+	sum += *(const unsigned long *)daddr->s6_addr;
+	sum += sum < *(const unsigned long *)daddr->s6_addr;
+
+	sum += *((const unsigned long *)daddr->s6_addr + 1);
+	sum += sum < *((const unsigned long *)daddr->s6_addr + 1);
+
+	sum += ulen;
+	sum += sum < ulen;
+
+	sum += uproto;
+	sum += sum < uproto;
+
+	sum += (sum >> 32) | (sum << 32);
+	sum >>= 32;
+	return csum_fold((__force __wsum)sum);
+}
+EXPORT_SYMBOL(csum_ipv6_magic);
+#endif
+
+#ifdef CONFIG_32BIT
+typedef unsigned int csum_t;
+#define OFFSET_MASK 3
+#else
+typedef unsigned long csum_t;
+#define OFFSET_MASK 7
+#endif
+
+/*
+ * Perform a checksum on an arbitrary memory address.
+ * Algorithm accounts for buff being misaligned.
+ * If not aligned on an 8-byte boundary, will read the whole byte but not use
+ * the bytes that it shouldn't. The same thing will occur on the tail-end of the
+ * read.
+ */
+unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
+{
+	unsigned int offset, shift;
+	csum_t csum, data;
+	const csum_t *ptr;
+
+	if (unlikely(len <= 0))
+		return 0;
+	/*
+	 * To align the address, grab the whole first byte in buff.
+	 * Since it is inside of a same byte, it will never cross pages or cache
+	 * lines.
+	 * Directly call KASAN with the alignment we will be using.
+	 */
+	offset = (csum_t)buff & OFFSET_MASK;
+	kasan_check_read(buff, len);
+	ptr = (const csum_t *)(buff - offset);
+	len = len + offset - sizeof(csum_t);
+
+	/*
+	 * RISC-V is always little endian, so need to clear bits to the right.
+	 */
+	shift = offset * 8;
+	data = *ptr;
+	data = (data >> shift) << shift;
+
+	while (len > 0) {
+		csum += data;
+		csum += csum < data;
+		len -= sizeof(csum_t);
+		ptr += 1;
+		data = *ptr;
+	}
+
+	/*
+	 * Perform alignment (and over-read) bytes on the tail if any bytes
+	 * leftover.
+	 */
+	shift = len * -8;
+	data = (data << shift) >> shift;
+	csum += data;
+	csum += csum < data;
+
+#ifdef CONFIG_64BIT
+	csum += (csum >> 32) | (csum << 32);
+	csum >>= 32;
+#endif
+	csum = (unsigned int)csum + (((unsigned int)csum >> 16) | ((unsigned int)csum << 16));
+	if (offset & 1)
+		return (unsigned short)swab32(csum);
+	return csum >> 16;
+}

-- 
2.41.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ