[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230628091241.GAZJv5ie0xVGvnMKIM@fat_crate.local>
Date: Wed, 28 Jun 2023 11:12:41 +0200
From: Borislav Petkov <bp@...en8.de>
To: Noah Goldstein <goldstein.w.n@...il.com>,
Linus Torvalds <torvalds@...ux-foundation.org>
Cc: x86@...nel.org, edumazet@...gle.com, tglx@...utronix.de,
mingo@...hat.com, dave.hansen@...ux.intel.com, hpa@...or.com,
lkml <linux-kernel@...r.kernel.org>
Subject: Re: x86/csum: Remove unnecessary odd handling
+ Linus who's been poking at this yesterday.
+ lkml. Please always CC lkml when sending patches.
On Tue, Jun 27, 2023 at 09:06:57PM -0500, Noah Goldstein wrote:
> The special case for odd aligned buffers is unnecessary and mostly
> just adds overhead. Aligned buffers is the expectations, and even for
> unaligned buffer, the only case that was helped is if the buffer was
> 1-byte from word aligned which is ~1/7 of the cases. Overall it seems
> highly unlikely to be worth to extra branch.
>
> It was left in the previous perf improvement patch because I was
> erroneously comparing the exact output of `csum_partial(...)`, but
> really we only need `csum_fold(csum_partial(...))` to match so its
> safe to remove.
>
> All csum kunit tests pass.
>
> Signed-off-by: Noah Goldstein <goldstein.w.n@...il.com>
> ---
> arch/x86/lib/csum-partial_64.c | 37 ++--------------------------------
> 1 file changed, 2 insertions(+), 35 deletions(-)
>
> diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
> index cea25ca8b8cf..d06112e98893 100644
> --- a/arch/x86/lib/csum-partial_64.c
> +++ b/arch/x86/lib/csum-partial_64.c
> @@ -11,28 +11,6 @@
> #include <asm/checksum.h>
> #include <asm/word-at-a-time.h>
>
> -static inline unsigned short from32to16(unsigned a)
> -{
> - unsigned short b = a >> 16;
> - asm("addw %w2,%w0\n\t"
> - "adcw $0,%w0\n"
> - : "=r" (b)
> - : "0" (b), "r" (a));
> - return b;
> -}
> -
> -static inline __wsum csum_tail(u64 temp64, int odd)
> -{
> - unsigned int result;
> -
> - result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
> - if (unlikely(odd)) {
> - result = from32to16(result);
> - result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
> - }
> - return (__force __wsum)result;
> -}
> -
> /*
> * Do a checksum on an arbitrary memory area.
> * Returns a 32bit checksum.
> @@ -47,17 +25,6 @@ static inline __wsum csum_tail(u64 temp64, int odd)
> __wsum csum_partial(const void *buff, int len, __wsum sum)
> {
> u64 temp64 = (__force u64)sum;
> - unsigned odd;
> -
> - odd = 1 & (unsigned long) buff;
> - if (unlikely(odd)) {
> - if (unlikely(len == 0))
> - return sum;
> - temp64 = ror32((__force u32)sum, 8);
> - temp64 += (*(unsigned char *)buff << 8);
> - len--;
> - buff++;
> - }
>
> /*
> * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
> @@ -73,7 +40,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
> "adcq $0,%[res]"
> : [res] "+r"(temp64)
> : [src] "r"(buff), "m"(*(const char(*)[40])buff));
> - return csum_tail(temp64, odd);
> + return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
> }
> if (unlikely(len >= 64)) {
> /*
> @@ -143,7 +110,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
> : [res] "+r"(temp64)
> : [trail] "r"(trail));
> }
> - return csum_tail(temp64, odd);
> + return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
> }
> EXPORT_SYMBOL(csum_partial);
>
> --
> 2.34.1
>
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
Powered by blists - more mailing lists