[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231205022418.1703007-20-viro@zeniv.linux.org.uk>
Date: Tue, 5 Dec 2023 02:24:11 +0000
From: Al Viro <viro@...iv.linux.org.uk>
To: linux-arch@...r.kernel.org
Cc: gus Gusenleitner Klaus <gus@...a.com>,
Al Viro <viro@....linux.org.uk>,
Thomas Gleixner <tglx@...utronix.de>,
lkml <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...hat.com>, "bp@...en8.de" <bp@...en8.de>,
"dave.hansen@...ux.intel.com" <dave.hansen@...ux.intel.com>,
"x86@...nel.org" <x86@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
"dsahern@...nel.org" <dsahern@...nel.org>,
"kuba@...nel.org" <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH v2 11/18] x86: merge csum_fold() for 32bit and 64bit
Identical.
Signed-off-by: Al Viro <viro@...iv.linux.org.uk>
---
arch/x86/include/asm/checksum.h | 22 ++++++++++++++++++++++
arch/x86/include/asm/checksum_32.h | 14 --------------
arch/x86/include/asm/checksum_64.h | 18 ------------------
3 files changed, 22 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index 6df6ece8a28e..eaa5dda09bee 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,13 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_CHECKSUM_H
+#define _ASM_X86_CHECKSUM_H
#ifdef CONFIG_GENERIC_CSUM
# include <asm-generic/checksum.h>
#else
# define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
# define HAVE_CSUM_COPY_USER
# define _HAVE_ARCH_CSUM_AND_COPY
+
+/**
+ * csum_fold - Fold and invert a 32bit checksum.
+ * sum: 32bit unfolded sum
+ *
+ * Fold a 32bit running checksum to 16bit and invert it. This is usually
+ * the last step before putting a checksum into a packet.
+ * Make sure not to mix with 64bit checksums.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ asm(" addl %1,%0\n"
+ " adcl $0xffff,%0"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
# ifdef CONFIG_X86_32
# include <asm/checksum_32.h>
# else
# include <asm/checksum_64.h>
# endif
#endif
+#endif
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 7570bdff7dea..4e96d0473f88 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -92,20 +92,6 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return (__force __sum16)sum;
}
-/*
- * Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
- asm("addl %1, %0 ;\n"
- "adcl $0xffff, %0 ;\n"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000));
- return (__force __sum16)(~(__force u32)sum >> 16);
-}
-
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
__wsum sum)
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 2bd75710eea1..d261b4124ca6 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -11,24 +11,6 @@
#include <linux/compiler.h>
#include <asm/byteorder.h>
-/**
- * csum_fold - Fold and invert a 32bit checksum.
- * sum: 32bit unfolded sum
- *
- * Fold a 32bit running checksum to 16bit and invert it. This is usually
- * the last step before putting a checksum into a packet.
- * Make sure not to mix with 64bit checksums.
- */
-static inline __sum16 csum_fold(__wsum sum)
-{
- asm(" addl %1,%0\n"
- " adcl $0xffff,%0"
- : "=r" (sum)
- : "r" ((__force u32)sum << 16),
- "0" ((__force u32)sum & 0xffff0000));
- return (__force __sum16)(~(__force u32)sum >> 16);
-}
-
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
--
2.39.2
Powered by blists - more mailing lists