[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220921214439.1491510-13-stillson@rivosinc.com>
Date: Wed, 21 Sep 2022 14:43:55 -0700
From: Chris Stillson <stillson@...osinc.com>
To: unlisted-recipients:; (no To-header on input)
Cc: Greentime Hu <greentime.hu@...ive.com>,
Han-Kuan Chen <hankuan.chen@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>,
Eric Biederman <ebiederm@...ssion.com>,
Kees Cook <keescook@...omium.org>,
Anup Patel <anup@...infault.org>,
Atish Patra <atishp@...shpatra.org>,
Oleg Nesterov <oleg@...hat.com>,
Heinrich Schuchardt <heinrich.schuchardt@...onical.com>,
Guo Ren <guoren@...nel.org>,
Conor Dooley <conor.dooley@...rochip.com>,
Arnaud Pouliquen <arnaud.pouliquen@...s.st.com>,
Chris Stillson <stillson@...osinc.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Qinglin Pan <panqinglin2020@...as.ac.cn>,
Alexandre Ghiti <alexandre.ghiti@...onical.com>,
Arnd Bergmann <arnd@...db.de>,
Vincent Chen <vincent.chen@...ive.com>,
Heiko Stuebner <heiko@...ech.de>, Dao Lu <daolu@...osinc.com>,
Jisheng Zhang <jszhang@...nel.org>,
Sunil V L <sunilvl@...tanamicro.com>,
Li Zhengyu <lizhengyu3@...wei.com>,
Alexander Graf <graf@...zon.com>,
Ard Biesheuvel <ardb@...nel.org>,
Tsukasa OI <research_trasio@....a4lg.com>,
Yury Norov <yury.norov@...il.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
Nicolas Saenz Julienne <nsaenzju@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Frederic Weisbecker <frederic@...nel.org>,
Changbin Du <changbin.du@...el.com>,
Vitaly Wool <vitaly.wool@...sulko.com>,
Myrtle Shah <gatecat@....me>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Mark Brown <broonie@...nel.org>,
Huacai Chen <chenhuacai@...nel.org>,
Alexey Dobriyan <adobriyan@...il.com>,
Janosch Frank <frankja@...ux.ibm.com>,
Christian Brauner <brauner@...nel.org>,
Colin Cross <ccross@...gle.com>,
Eugene Syromiatnikov <esyr@...hat.com>,
Peter Collingbourne <pcc@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Barret Rhoden <brho@...gle.com>,
Davidlohr Bueso <dave@...olabs.net>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, kvm@...r.kernel.org,
kvm-riscv@...ts.infradead.org
Subject: [PATCH v12 13/17] riscv: Add vector extension XOR implementation
From: Greentime Hu <greentime.hu@...ive.com>
This patch adds support for vector optimized XOR and it is tested in
qemu.
Co-developed-by: Han-Kuan Chen <hankuan.chen@...ive.com>
Signed-off-by: Han-Kuan Chen <hankuan.chen@...ive.com>
Signed-off-by: Greentime Hu <greentime.hu@...ive.com>
---
arch/riscv/include/asm/xor.h | 82 ++++++++++++++++++++++++++++++++++++
arch/riscv/lib/Makefile | 1 +
arch/riscv/lib/xor.S | 81 +++++++++++++++++++++++++++++++++++
3 files changed, 164 insertions(+)
create mode 100644 arch/riscv/include/asm/xor.h
create mode 100644 arch/riscv/lib/xor.S
diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h
new file mode 100644
index 000000000000..d1f2eeb14afb
--- /dev/null
+++ b/arch/riscv/include/asm/xor.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+
+#include <linux/hardirq.h>
+#include <asm-generic/xor.h>
+#ifdef CONFIG_VECTOR
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+
+void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2);
+void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3);
+void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4);
+void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5);
+
+static void xor_rvv_2(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2)
+{
+ kernel_rvv_begin();
+ xor_regs_2_(bytes, p1, p2);
+ kernel_rvv_end();
+}
+
+static void xor_rvv_3(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3)
+{
+ kernel_rvv_begin();
+ xor_regs_3_(bytes, p1, p2, p3);
+ kernel_rvv_end();
+}
+
+static void xor_rvv_4(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4)
+{
+ kernel_rvv_begin();
+ xor_regs_4_(bytes, p1, p2, p3, p4);
+ kernel_rvv_end();
+}
+
+static void xor_rvv_5(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5)
+{
+ kernel_rvv_begin();
+ xor_regs_5_(bytes, p1, p2, p3, p4, p5);
+ kernel_rvv_end();
+}
+
+static struct xor_block_template xor_block_rvv = {
+ .name = "rvv",
+ .do_2 = xor_rvv_2,
+ .do_3 = xor_rvv_3,
+ .do_4 = xor_rvv_4,
+ .do_5 = xor_rvv_5
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ if (has_vector()) { \
+ xor_speed(&xor_block_rvv);\
+ } \
+ } while (0)
+#endif
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 25d5c9664e57..acd87ac86d24 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -7,3 +7,4 @@ lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+lib-$(CONFIG_VECTOR) += xor.o
diff --git a/arch/riscv/lib/xor.S b/arch/riscv/lib/xor.S
new file mode 100644
index 000000000000..3bc059e18171
--- /dev/null
+++ b/arch/riscv/lib/xor.S
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+
+ENTRY(xor_regs_2_)
+ vsetvli a3, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a3
+ vxor.vv v16, v0, v8
+ add a2, a2, a3
+ vse8.v v16, (a1)
+ add a1, a1, a3
+ bnez a0, xor_regs_2_
+ ret
+END(xor_regs_2_)
+EXPORT_SYMBOL(xor_regs_2_)
+
+ENTRY(xor_regs_3_)
+ vsetvli a4, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a4
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a4
+ vxor.vv v16, v0, v16
+ add a3, a3, a4
+ vse8.v v16, (a1)
+ add a1, a1, a4
+ bnez a0, xor_regs_3_
+ ret
+END(xor_regs_3_)
+EXPORT_SYMBOL(xor_regs_3_)
+
+ENTRY(xor_regs_4_)
+ vsetvli a5, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a5
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a5
+ vxor.vv v0, v0, v16
+ vle8.v v24, (a4)
+ add a3, a3, a5
+ vxor.vv v16, v0, v24
+ add a4, a4, a5
+ vse8.v v16, (a1)
+ add a1, a1, a5
+ bnez a0, xor_regs_4_
+ ret
+END(xor_regs_4_)
+EXPORT_SYMBOL(xor_regs_4_)
+
+ENTRY(xor_regs_5_)
+ vsetvli a6, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a6
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a6
+ vxor.vv v0, v0, v16
+ vle8.v v24, (a4)
+ add a3, a3, a6
+ vxor.vv v0, v0, v24
+ vle8.v v8, (a5)
+ add a4, a4, a6
+ vxor.vv v16, v0, v8
+ add a5, a5, a6
+ vse8.v v16, (a1)
+ add a1, a1, a6
+ bnez a0, xor_regs_5_
+ ret
+END(xor_regs_5_)
+EXPORT_SYMBOL(xor_regs_5_)
--
2.25.1
Powered by blists - more mailing lists