[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b8065a61-f5eb-4ec5-a9af-6d6bcdf1ee9b@roeck-us.net>
Date: Thu, 15 Feb 2024 21:25:00 -0800
From: Guenter Roeck <linux@...ck-us.net>
To: Helge Deller <deller@....de>
Cc: Charlie Jenkins <charlie@...osinc.com>,
David Laight <David.Laight@...lab.com>,
Palmer Dabbelt <palmer@...belt.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Parisc List <linux-parisc@...r.kernel.org>,
Al Viro <viro@...iv.linux.org.uk>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 2/2] lib: checksum: Use aligned accesses for
ip_fast_csum and csum_ipv6_magic tests
On Fri, Feb 16, 2024 at 06:54:55AM +0100, Helge Deller wrote:
>
> Can you please give a pointer to this test code?
> I'm happy to try it on real hardware.
>
See below.
Guenter
---
>From 0478f35f02224994e1d81e614b66219ab7539f7f Mon Sep 17 00:00:00 2001
From: Guenter Roeck <linux@...ck-us.net>
Date: Wed, 14 Feb 2024 11:25:18 -0800
Subject: [PATCH] carry tests
Signed-off-by: Guenter Roeck <linux@...ck-us.net>
---
lib/checksum_kunit.c | 76 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 72c313ba4c78..8f7925396e53 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -546,12 +546,88 @@ static void test_csum_ipv6_magic(struct kunit *test)
#endif /* !CONFIG_NET */
}
+#ifdef CONFIG_64BIT
+
+static __inline__ int get_carry64(void *addr)
+{
+ int carry = 0;
+ unsigned long sum = 0xffffffff;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+" add %0, %0, %0\n" /* clear carry */
+" ldd 0(%2), %3\n" /* load from memory */
+" add %1, %3, %1\n" /* optionally generate carry */
+" ldd 0(%2), %3\n" /* load from memory again */
+" add,dc %0, %0, %0\n" /* return carry */
+ : "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
+ : "0" (carry), "1" (sum), "2" (addr)
+ : "memory");
+
+ return carry;
+}
+
+static __inline__ int get_carry32(void *addr)
+{
+ int carry = 0;
+ unsigned int sum = 0xffffffff;
+ unsigned int tmp;
+
+ __asm__ __volatile__ (
+" add %0, %0, %0\n" /* clear carry */
+" ldw 0(%2), %3\n" /* load from memory */
+" add %1, %3, %1\n" /* optionally generate carry */
+" ldw 0(%2), %3\n" /* load from memory again */
+" addc %0, %0, %0\n" /* return carry */
+ : "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
+ : "0" (carry), "1" (sum), "2" (addr)
+ : "memory");
+
+ return carry;
+}
+
+static void test_bad_carry(struct kunit *test)
+{
+ int carry;
+
+ memset(tmp_buf, 0xff, sizeof(tmp_buf));
+ carry = get_carry64(&tmp_buf[0]);
+ pr_info("#### carry64 aligned, expect 1 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[4]);
+ pr_info("#### carry64 unaligned 4, expect 1 -> %d\n", carry);
+
+ carry = get_carry64(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned 2, expect 1 -> %d\n", carry);
+
+ carry = get_carry32(&tmp_buf[0]);
+ pr_info("#### carry32 aligned, expect 1 -> %d\n", carry);
+ carry = get_carry32(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned, expect 1 -> %d\n", carry);
+
+ memset(tmp_buf, 0, sizeof(tmp_buf));
+ carry = get_carry64(&tmp_buf[0]);
+ pr_info("#### carry64 aligned, expect 0 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[4]);
+ pr_info("#### carry64 unaligned 4, expect 0 -> %d\n", carry);
+ carry = get_carry64(&tmp_buf[2]);
+ pr_info("#### carry64 unaligned 2, expect 0 -> %d\n", carry);
+
+ carry = get_carry32(&tmp_buf[0]);
+ pr_info("#### carry32 aligned, expect 0 -> %d\n", carry);
+ carry = get_carry32(&tmp_buf[2]);
+ pr_info("#### carry32 unaligned, expect 0 -> %d\n", carry);
+}
+#else
+static void test_bad_carry(struct kunit *test) {}
+#endif /* CONFIG_64BIT */
+
static struct kunit_case __refdata checksum_test_cases[] = {
KUNIT_CASE(test_csum_fixed_random_inputs),
KUNIT_CASE(test_csum_all_carry_inputs),
KUNIT_CASE(test_csum_no_carry_inputs),
KUNIT_CASE(test_ip_fast_csum),
KUNIT_CASE(test_csum_ipv6_magic),
+ KUNIT_CASE(test_bad_carry),
{}
};
--
2.39.2
Powered by blists - more mailing lists