[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250325121624.523258-6-guoren@kernel.org>
Date: Tue, 25 Mar 2025 08:15:46 -0400
From: guoren@...nel.org
To: arnd@...db.de,
gregkh@...uxfoundation.org,
torvalds@...ux-foundation.org,
paul.walmsley@...ive.com,
palmer@...belt.com,
anup@...infault.org,
atishp@...shpatra.org,
oleg@...hat.com,
kees@...nel.org,
tglx@...utronix.de,
will@...nel.org,
mark.rutland@....com,
brauner@...nel.org,
akpm@...ux-foundation.org,
rostedt@...dmis.org,
edumazet@...gle.com,
unicorn_wang@...look.com,
inochiama@...look.com,
gaohan@...as.ac.cn,
shihua@...as.ac.cn,
jiawei@...as.ac.cn,
wuwei2016@...as.ac.cn,
drew@...7.com,
prabhakar.mahadev-lad.rj@...renesas.com,
ctsai390@...estech.com,
wefu@...hat.com,
kuba@...nel.org,
pabeni@...hat.com,
josef@...icpanda.com,
dsterba@...e.com,
mingo@...hat.com,
peterz@...radead.org,
boqun.feng@...il.com,
guoren@...nel.org,
xiao.w.wang@...el.com,
qingfang.deng@...lower.com.cn,
leobras@...hat.com,
jszhang@...nel.org,
conor.dooley@...rochip.com,
samuel.holland@...ive.com,
yongxuan.wang@...ive.com,
luxu.kernel@...edance.com,
david@...hat.com,
ruanjinjie@...wei.com,
cuiyunhui@...edance.com,
wangkefeng.wang@...wei.com,
qiaozhe@...as.ac.cn
Cc: ardb@...nel.org,
ast@...nel.org,
linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org,
kvm@...r.kernel.org,
kvm-riscv@...ts.infradead.org,
linux-mm@...ck.org,
linux-crypto@...r.kernel.org,
bpf@...r.kernel.org,
linux-input@...r.kernel.org,
linux-perf-users@...r.kernel.org,
linux-serial@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-arch@...r.kernel.org,
maple-tree@...ts.infradead.org,
linux-trace-kernel@...r.kernel.org,
netdev@...r.kernel.org,
linux-atm-general@...ts.sourceforge.net,
linux-btrfs@...r.kernel.org,
netfilter-devel@...r.kernel.org,
coreteam@...filter.org,
linux-nfs@...r.kernel.org,
linux-sctp@...r.kernel.org,
linux-usb@...r.kernel.org,
linux-media@...r.kernel.org
Subject: [RFC PATCH V3 05/43] rv64ilp32_abi: riscv: crc32: Utilize 64-bit width to improve the performance
From: "Guo Ren (Alibaba DAMO Academy)" <guoren@...nel.org>
The RV64ILP32 ABI, derived from a 64-bit ISA, uses 32-bit
BITS_PER_LONG. Therefore, crc32 algorithm could utilize 64-bit width
to improve the performance.
Signed-off-by: Guo Ren (Alibaba DAMO Academy) <guoren@...nel.org>
---
arch/riscv/lib/crc32-riscv.c | 35 ++++++++++++++++++-----------------
1 file changed, 18 insertions(+), 17 deletions(-)
diff --git a/arch/riscv/lib/crc32-riscv.c b/arch/riscv/lib/crc32-riscv.c
index 53d56ab422c7..68dfb0565696 100644
--- a/arch/riscv/lib/crc32-riscv.c
+++ b/arch/riscv/lib/crc32-riscv.c
@@ -8,6 +8,7 @@
#include <asm/hwcap.h>
#include <asm/alternative-macros.h>
#include <asm/byteorder.h>
+#include <asm/csr.h>
#include <linux/types.h>
#include <linux/minmax.h>
@@ -59,12 +60,12 @@
*/
# define CRC32_POLY_QT_BE 0x04d101df481b4e5a
-static inline u64 crc32_le_prep(u32 crc, unsigned long const *ptr)
+static inline u64 crc32_le_prep(u32 crc, u64 const *ptr)
{
return (u64)crc ^ (__force u64)__cpu_to_le64(*ptr);
}
-static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt)
+static inline u32 crc32_le_zbc(u64 s, u32 poly, u64 poly_qt)
{
u32 crc;
@@ -85,7 +86,7 @@ static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt)
return crc;
}
-static inline u64 crc32_be_prep(u32 crc, unsigned long const *ptr)
+static inline u64 crc32_be_prep(u32 crc, u64 const *ptr)
{
return ((u64)crc << 32) ^ (__force u64)__cpu_to_be64(*ptr);
}
@@ -131,7 +132,7 @@ static inline u32 crc32_be_prep(u32 crc, unsigned long const *ptr)
# error "Unexpected __riscv_xlen"
#endif
-static inline u32 crc32_be_zbc(unsigned long s)
+static inline u32 crc32_be_zbc(xlen_t s)
{
u32 crc;
@@ -156,16 +157,16 @@ typedef u32 (*fallback)(u32 crc, unsigned char const *p, size_t len);
static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p,
size_t len, u32 poly,
- unsigned long poly_qt)
+ xlen_t poly_qt)
{
size_t bits = len * 8;
- unsigned long s = 0;
+ xlen_t s = 0;
u32 crc_low = 0;
for (int i = 0; i < len; i++)
- s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8);
+ s = ((xlen_t)*p++ << (__riscv_xlen - 8)) | (s >> 8);
- s ^= (unsigned long)crc << (__riscv_xlen - bits);
+ s ^= (xlen_t)crc << (__riscv_xlen - bits);
if (__riscv_xlen == 32 || len < sizeof(u32))
crc_low = crc >> bits;
@@ -177,12 +178,12 @@ static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p,
static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
size_t len, u32 poly,
- unsigned long poly_qt,
+ xlen_t poly_qt,
fallback crc_fb)
{
size_t offset, head_len, tail_len;
- unsigned long const *p_ul;
- unsigned long s;
+ xlen_t const *p_ul;
+ xlen_t s;
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBC, 1)
@@ -199,7 +200,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
tail_len = len & OFFSET_MASK;
len = len >> STEP_ORDER;
- p_ul = (unsigned long const *)p;
+ p_ul = (xlen_t const *)p;
for (int i = 0; i < len; i++) {
s = crc32_le_prep(crc, p_ul);
@@ -236,7 +237,7 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
size_t len)
{
size_t bits = len * 8;
- unsigned long s = 0;
+ xlen_t s = 0;
u32 crc_low = 0;
s = 0;
@@ -247,7 +248,7 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
s ^= crc >> (32 - bits);
crc_low = crc << bits;
} else {
- s ^= (unsigned long)crc << (bits - 32);
+ s ^= (xlen_t)crc << (bits - 32);
}
crc = crc32_be_zbc(s);
@@ -259,8 +260,8 @@ static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
size_t offset, head_len, tail_len;
- unsigned long const *p_ul;
- unsigned long s;
+ xlen_t const *p_ul;
+ xlen_t s;
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBC, 1)
@@ -277,7 +278,7 @@ u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
tail_len = len & OFFSET_MASK;
len = len >> STEP_ORDER;
- p_ul = (unsigned long const *)p;
+ p_ul = (xlen_t const *)p;
for (int i = 0; i < len; i++) {
s = crc32_be_prep(crc, p_ul);
--
2.40.1
Powered by blists - more mailing lists