[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211125193852.3617-1-goldstein.w.n@gmail.com>
Date: Thu, 25 Nov 2021 13:38:52 -0600
From: Noah Goldstein <goldstein.w.n@...il.com>
To: unlisted-recipients:; (no To-header on input)
Cc: tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...ux.intel.com, x86@...nel.org, hpa@...or.com,
peterz@...radead.org, alexanderduyck@...com,
goldstein.w.n@...il.com, edumazet@...gle.com,
linux-kernel@...r.kernel.org
Subject: [PATCH v1] x86/lib: Optimize 8x loop and memory clobbers in csum_partial.c
Modify the 8x loop to that it uses two independent
accumulators. Despite adding more instructions the latency and
throughput of the loop is improved because the `adc` chains can now
take advantage of multiple execution units.
Make the memory clobbers more precise. 'buff' is read only and we know
the exact usage range. There is no reason to write-clobber all memory.
Relative performance changes on Tigerlake:
Time Unit: Ref Cycles
Size Unit: Bytes
size, lat old, lat new, tput old, tput new
0, 4.972, 5.054, 4.864, 4.870
100, 14.218, 12.476, 9.429, 9.441
200, 22.115, 16.937, 13.088, 12.852
300, 31.826, 24.640, 19.383, 18.230
400, 39.016, 28.133, 23.223, 21.304
500, 48.815, 36.186, 30.331, 27.104
600, 56.732, 40.120, 35.899, 30.363
700, 66.623, 48.178, 43.044, 36.400
800, 73.259, 51.171, 48.564, 39.173
900, 82.821, 56.635, 58.592, 45.162
1000, 90.780, 63.703, 65.658, 48.718
Signed-off-by: Noah Goldstein <goldstein.w.n@...il.com>
---
arch/x86/lib/csum-partial_64.c | 36 +++++++++++++++++-----------------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index ded842cd1020..76e2f540587e 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -48,18 +48,21 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
}
while (unlikely(len >= 64)) {
- asm("addq 0*8(%[src]),%[res]\n\t"
- "adcq 1*8(%[src]),%[res]\n\t"
- "adcq 2*8(%[src]),%[res]\n\t"
- "adcq 3*8(%[src]),%[res]\n\t"
- "adcq 4*8(%[src]),%[res]\n\t"
+ u64 temp_accum;
+
+ asm("movq 0*8(%[src]),%[res_tmp]\n\t"
+ "addq 1*8(%[src]),%[res_tmp]\n\t"
+ "adcq 2*8(%[src]),%[res_tmp]\n\t"
+ "adcq 3*8(%[src]),%[res_tmp]\n\t"
+ "adcq $0,%[res_tmp]\n\t"
+ "addq 4*8(%[src]),%[res]\n\t"
"adcq 5*8(%[src]),%[res]\n\t"
"adcq 6*8(%[src]),%[res]\n\t"
"adcq 7*8(%[src]),%[res]\n\t"
- "adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ "adcq %[res_tmp], %[res]\n\t"
+ "adcq $0,%[res]\n\t"
+ : [res] "+r"(temp64), [res_tmp] "=&r"(temp_accum)
+ : [src] "r"(buff), "m"(*(const char(*)[64])buff));
buff += 64;
len -= 64;
}
@@ -70,26 +73,23 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ : [res] "+r"(temp64)
+ : [src] "r"(buff), "m"(*(const char(*)[32])buff));
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ : [res] "+r"(temp64)
+ : [src] "r"(buff), "m"(*(const char(*)[16])buff));
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
- : [res] "+r" (temp64)
- : [src] "r" (buff)
- : "memory");
+ : [res] "+r"(temp64)
+ : [src] "r"(buff), "m"(*(const char(*)[8])buff));
buff += 8;
}
if (len & 7) {
--
2.25.1
Powered by blists - more mailing lists