[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251025210655.43099-8-geomatsi@gmail.com>
Date: Sun, 26 Oct 2025 00:06:40 +0300
From: Sergey Matyukevich <geomatsi@...il.com>
To: linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: Paul Walmsley <pjw@...nel.org>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>,
Alexandre Ghiti <alex@...ti.fr>,
Oleg Nesterov <oleg@...hat.com>,
Shuah Khan <shuah@...nel.org>,
Thomas Huth <thuth@...hat.com>,
Charlie Jenkins <charlie@...osinc.com>,
Andy Chiu <andybnac@...il.com>,
Samuel Holland <samuel.holland@...ive.com>,
Joel Granados <joel.granados@...nel.org>,
Conor Dooley <conor.dooley@...rochip.com>,
Yong-Xuan Wang <yongxuan.wang@...ive.com>,
Heiko Stuebner <heiko@...ech.de>,
Sergey Matyukevich <geomatsi@...il.com>
Subject: [PATCH v3 7/9] selftests: riscv: verify ptrace rejects invalid vector csr inputs
Add a test to v_ptrace test suite to verify that ptrace rejects the
invalid input combinations of vector csr registers. Use kselftest
fixture variants to create multiple invalid inputs for the test.
Signed-off-by: Sergey Matyukevich <geomatsi@...il.com>
---
.../testing/selftests/riscv/vector/v_ptrace.c | 232 ++++++++++++++++++
1 file changed, 232 insertions(+)
diff --git a/tools/testing/selftests/riscv/vector/v_ptrace.c b/tools/testing/selftests/riscv/vector/v_ptrace.c
index 9fea29f7b686..6f3f228c0954 100644
--- a/tools/testing/selftests/riscv/vector/v_ptrace.c
+++ b/tools/testing/selftests/riscv/vector/v_ptrace.c
@@ -183,4 +183,236 @@ TEST(ptrace_v_early_debug)
}
}
+FIXTURE(v_csr_invalid)
+{
+};
+
+FIXTURE_SETUP(v_csr_invalid)
+{
+}
+
+FIXTURE_TEARDOWN(v_csr_invalid)
+{
+}
+
+/* modifications of the initial 'vsetvli x0, x0, e8, m8, tu, mu' settings */
+FIXTURE_VARIANT(v_csr_invalid)
+{
+ unsigned long vstart;
+ unsigned long vl;
+ unsigned long vtype;
+ unsigned long vcsr;
+ unsigned long vlenb_mul;
+ unsigned long vlenb_min;
+ unsigned long vlenb_max;
+};
+
+/* unexpected vlenb value */
+FIXTURE_VARIANT_ADD(v_csr_invalid, new_vlenb)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = 0x3,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x2,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x0,
+};
+
+/* invalid reserved bits in vcsr */
+FIXTURE_VARIANT_ADD(v_csr_invalid, vcsr_invalid_reserved_bits)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = 0x3,
+ .vcsr = 0x1UL << 8,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x0,
+};
+
+/* invalid reserved bits in vtype */
+FIXTURE_VARIANT_ADD(v_csr_invalid, vtype_invalid_reserved_bits)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = (0x1UL << 8) | 0x3,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x0,
+};
+
+/* set vill bit */
+FIXTURE_VARIANT_ADD(v_csr_invalid, invalid_vill_bit)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = (0x1UL << (__riscv_xlen - 1)) | 0x3,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x0,
+};
+
+/* reserved vsew value: vsew > 3 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vsew)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = 0x4UL << 3,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x0,
+};
+
+/* reserved vlmul value: vlmul == 4 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, reserved_vlmul)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = 0x4,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x0,
+};
+
+/* invalid fractional LMUL for VLEN <= 256: LMUL= 1/8, SEW = 64 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, frac_lmul1)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = 0x1d,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x20,
+};
+
+/* invalid integral LMUL for VLEN <= 16: LMUL= 2, SEW = 64 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, int_lmul1)
+{
+ .vstart = 0x0,
+ .vl = 0x0,
+ .vtype = 0x19,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x2,
+};
+
+/* invalid VL for VLEN <= 128: LMUL= 2, SEW = 64, VL = 8 */
+FIXTURE_VARIANT_ADD(v_csr_invalid, vl1)
+{
+ .vstart = 0x0,
+ .vl = 0x8,
+ .vtype = 0x19,
+ .vcsr = 0x0,
+ .vlenb_mul = 0x1,
+ .vlenb_min = 0x0,
+ .vlenb_max = 0x10,
+};
+
+TEST_F(v_csr_invalid, ptrace_v_invalid_values)
+{
+ unsigned long vlenb;
+ pid_t pid;
+
+ if (!is_vector_supported())
+ SKIP(return, "Vector not supported");
+
+ asm volatile("csrr %[vlenb], vlenb" : [vlenb] "=r"(vlenb));
+ if (variant->vlenb_min) {
+ if (vlenb < variant->vlenb_min)
+ SKIP(return, "This test does not support VLEN < %lu\n",
+ variant->vlenb_min * 8);
+ }
+ if (variant->vlenb_max) {
+ if (vlenb > variant->vlenb_max)
+ SKIP(return, "This test does not support VLEN > %lu\n",
+ variant->vlenb_max * 8);
+ }
+
+ chld_lock = 1;
+ pid = fork();
+ ASSERT_LE(0, pid)
+ TH_LOG("fork: %m");
+
+ if (pid == 0) {
+ while (chld_lock == 1)
+ asm volatile("" : : "g"(chld_lock) : "memory");
+
+ asm(".option arch, +zve32x\n");
+ asm(".option arch, +c\n");
+ asm volatile("vsetvli x0, x0, e8, m8, tu, mu\n");
+
+ while (1) {
+ asm volatile("c.ebreak");
+ asm volatile("c.nop");
+ }
+ } else {
+ struct __riscv_v_regset_state *regset_data;
+ size_t regset_size;
+ struct iovec iov;
+ int status;
+ int ret;
+
+ /* attach */
+
+ ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid, NULL, NULL));
+ ASSERT_EQ(pid, waitpid(pid, &status, 0));
+ ASSERT_TRUE(WIFSTOPPED(status));
+
+ /* unlock */
+
+ ASSERT_EQ(0, ptrace(PTRACE_POKEDATA, pid, &chld_lock, 0));
+
+ /* resume and wait for the 1st c.ebreak */
+
+ ASSERT_EQ(0, ptrace(PTRACE_CONT, pid, NULL, NULL));
+ ASSERT_EQ(pid, waitpid(pid, &status, 0));
+ ASSERT_TRUE(WIFSTOPPED(status));
+
+ /* read tracee vector csr regs using ptrace GETREGSET */
+
+ regset_size = sizeof(*regset_data) + vlenb * 32;
+ regset_data = calloc(1, regset_size);
+
+ iov.iov_base = regset_data;
+ iov.iov_len = regset_size;
+
+ ASSERT_EQ(0, ptrace(PTRACE_GETREGSET, pid, NT_RISCV_VECTOR, &iov));
+
+ /* verify initial vsetvli x0, x0, e8, m8, tu, mu settings */
+
+ EXPECT_EQ(vlenb, regset_data->vlenb);
+ EXPECT_EQ(0UL, regset_data->vstart);
+ EXPECT_EQ(3UL, regset_data->vtype);
+ EXPECT_EQ(0UL, regset_data->vcsr);
+ EXPECT_EQ(0UL, regset_data->vl);
+
+ /* apply invalid settings from fixture variants */
+
+ regset_data->vlenb *= variant->vlenb_mul;
+ regset_data->vstart = variant->vstart;
+ regset_data->vtype = variant->vtype;
+ regset_data->vcsr = variant->vcsr;
+ regset_data->vl = variant->vl;
+
+ iov.iov_base = regset_data;
+ iov.iov_len = regset_size;
+
+ errno = 0;
+ ret = ptrace(PTRACE_SETREGSET, pid, NT_RISCV_VECTOR, &iov);
+ ASSERT_EQ(errno, EINVAL);
+ ASSERT_EQ(ret, -1);
+
+ /* cleanup */
+
+ ASSERT_EQ(0, kill(pid, SIGKILL));
+ }
+}
+
TEST_HARNESS_MAIN
--
2.51.0
Powered by blists - more mailing lists