lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7b207808-10e7-44b0-9a9f-253e2349011d@gmx.de>
Date: Fri, 16 Feb 2024 08:31:22 +0100
From: Helge Deller <deller@....de>
To: Guenter Roeck <linux@...ck-us.net>
Cc: Charlie Jenkins <charlie@...osinc.com>,
 David Laight <David.Laight@...lab.com>, Palmer Dabbelt <palmer@...belt.com>,
 Andrew Morton <akpm@...ux-foundation.org>,
 Parisc List <linux-parisc@...r.kernel.org>, Al Viro
 <viro@...iv.linux.org.uk>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 2/2] lib: checksum: Use aligned accesses for
 ip_fast_csum and csum_ipv6_magic tests

On 2/16/24 06:25, Guenter Roeck wrote:
> On Fri, Feb 16, 2024 at 06:54:55AM +0100, Helge Deller wrote:
>>
>> Can you please give a pointer to this test code?
>> I'm happy to try it on real hardware.
>>
> See below.

Testcase runs OK on physical machine:

#### carry64 aligned, expect 1 -> 1
#### carry64 unaligned 4, expect 1 -> 1
#### carry64 unaligned 2, expect 1 -> 1
#### carry32 aligned, expect 1 -> 1
#### carry64 unaligned, expect 1 -> 1
#### carry64 aligned, expect 0 -> 0
#### carry64 unaligned 4, expect 0 -> 0
#### carry64 unaligned 2, expect 0 -> 0
#### carry32 aligned, expect 0 -> 0
#### carry32 unaligned, expect 0 -> 0
     ok 6 test_bad_carry

Helge

> ---
>  From 0478f35f02224994e1d81e614b66219ab7539f7f Mon Sep 17 00:00:00 2001
> From: Guenter Roeck <linux@...ck-us.net>
> Date: Wed, 14 Feb 2024 11:25:18 -0800
> Subject: [PATCH] carry tests
>
> Signed-off-by: Guenter Roeck <linux@...ck-us.net>
> ---
>   lib/checksum_kunit.c | 76 ++++++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 76 insertions(+)
>
> diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
> index 72c313ba4c78..8f7925396e53 100644
> --- a/lib/checksum_kunit.c
> +++ b/lib/checksum_kunit.c
> @@ -546,12 +546,88 @@ static void test_csum_ipv6_magic(struct kunit *test)
>   #endif /* !CONFIG_NET */
>   }
>
> +#ifdef CONFIG_64BIT
> +
> +static __inline__ int get_carry64(void *addr)
> +{
> +	int carry = 0;
> +	unsigned long sum = 0xffffffff;
> +	unsigned long tmp;
> +
> +	__asm__ __volatile__ (
> +"	add	%0, %0, %0\n"	/* clear carry			*/
> +"	ldd	0(%2), %3\n"	/* load from memory		*/
> +"	add	%1, %3, %1\n"	/* optionally generate carry	*/
> +"	ldd	0(%2), %3\n"	/* load from memory again	*/
> +"	add,dc	%0, %0, %0\n"	/* return carry			*/
> +	: "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
> +	: "0" (carry), "1" (sum), "2" (addr)
> +	: "memory");
> +
> +	return carry;
> +}
> +
> +static __inline__ int get_carry32(void *addr)
> +{
> +	int carry = 0;
> +	unsigned int sum = 0xffffffff;
> +	unsigned int tmp;
> +
> +	__asm__ __volatile__ (
> +"	add	%0, %0, %0\n"	/* clear carry			*/
> +"	ldw	0(%2), %3\n"	/* load from memory		*/
> +"	add	%1, %3, %1\n"	/* optionally generate carry	*/
> +"	ldw	0(%2), %3\n"	/* load from memory again	*/
> +"	addc	%0, %0, %0\n"	/* return carry			*/
> +	: "=r" (carry), "=r" (sum), "=r" (addr), "=r" (tmp)
> +	: "0" (carry), "1" (sum), "2" (addr)
> +	: "memory");
> +
> +	return carry;
> +}
> +
> +static void test_bad_carry(struct kunit *test)
> +{
> +	int carry;
> +
> +	memset(tmp_buf, 0xff, sizeof(tmp_buf));
> +	carry = get_carry64(&tmp_buf[0]);
> +	pr_info("#### carry64 aligned, expect 1 -> %d\n", carry);
> +	carry = get_carry64(&tmp_buf[4]);
> +	pr_info("#### carry64 unaligned 4, expect 1 -> %d\n", carry);
> +
> +	carry = get_carry64(&tmp_buf[2]);
> +	pr_info("#### carry64 unaligned 2, expect 1 -> %d\n", carry);
> +
> +	carry = get_carry32(&tmp_buf[0]);
> +	pr_info("#### carry32 aligned, expect 1 -> %d\n", carry);
> +	carry = get_carry32(&tmp_buf[2]);
> +	pr_info("#### carry64 unaligned, expect 1 -> %d\n", carry);
> +
> +	memset(tmp_buf, 0, sizeof(tmp_buf));
> +	carry = get_carry64(&tmp_buf[0]);
> +	pr_info("#### carry64 aligned, expect 0 -> %d\n", carry);
> +	carry = get_carry64(&tmp_buf[4]);
> +	pr_info("#### carry64 unaligned 4, expect 0 -> %d\n", carry);
> +	carry = get_carry64(&tmp_buf[2]);
> +	pr_info("#### carry64 unaligned 2, expect 0 -> %d\n", carry);
> +
> +	carry = get_carry32(&tmp_buf[0]);
> +	pr_info("#### carry32 aligned, expect 0 -> %d\n", carry);
> +	carry = get_carry32(&tmp_buf[2]);
> +	pr_info("#### carry32 unaligned, expect 0 -> %d\n", carry);
> +}
> +#else
> +static void test_bad_carry(struct kunit *test) {}
> +#endif /* CONFIG_64BIT */
> +
>   static struct kunit_case __refdata checksum_test_cases[] = {
>   	KUNIT_CASE(test_csum_fixed_random_inputs),
>   	KUNIT_CASE(test_csum_all_carry_inputs),
>   	KUNIT_CASE(test_csum_no_carry_inputs),
>   	KUNIT_CASE(test_ip_fast_csum),
>   	KUNIT_CASE(test_csum_ipv6_magic),
> +	KUNIT_CASE(test_bad_carry),
>   	{}
>   };
>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ