[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CABk29Ntt88BfqGz=Rjd5Mwyj+N0YLv0Z9QsZp8DgB-n+FV1Zgw@mail.gmail.com>
Date: Tue, 2 Mar 2021 12:57:37 -0800
From: Josh Don <joshdon@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Clement Courbet <courbet@...gle.com>,
Oleg Rombakh <olegrom@...gle.com>
Subject: Re: [PATCH] sched: Optimize __calc_delta.
From: Clement Courbet <courbet@...gle.com>
A significant portion of __calc_delta time is spent in the loop
shifting a u64 by 32 bits. Use `fls` instead of iterating.
This is ~7x faster on benchmarks.
The generic `fls` implementation (`generic_fls`) is still ~4x faster
than the loop.
Architectures that have a better implementation will make use of it. For
example, on X86 we get an additional factor 2 in speed without dedicated
implementation.
On gcc, the asm versions of `fls` are about the same speed as the
builtin. On clang, the versions that use fls (fls,fls64) are more than
twice as slow as the builtin. This is because the way the `fls` function
is written, clang puts the value in memory:
https://godbolt.org/z/EfMbYe. This can be fixed in a separate patch.
```
name cpu/op
BM_Calc<__calc_delta_loop> 9.57ms ±12%
BM_Calc<__calc_delta_generic_fls> 2.36ms ±13%
BM_Calc<__calc_delta_asm_fls> 2.45ms ±13%
BM_Calc<__calc_delta_asm_fls_nomem> 1.66ms ±12%
BM_Calc<__calc_delta_asm_fls64> 2.46ms ±13%
BM_Calc<__calc_delta_asm_fls64_nomem> 1.34ms ±15%
BM_Calc<__calc_delta_builtin> 1.32ms ±11%
```
Signed-off-by: Clement Courbet <courbet@...gle.com>
Signed-off-by: Josh Don <joshdon@...gle.com>
---
kernel/sched/fair.c | 30 ++++++++++++++++++++++--------
kernel/sched/sched.h | 1 +
2 files changed, 23 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8a8bd7b13634..67e5a1d536ad 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -214,6 +214,16 @@ static void __update_inv_weight(struct load_weight *lw)
lw->inv_weight = WMULT_CONST / w;
}
+/*
+ * An fls that handles an u32 value on architectures
+ * where `sizeof(unsigned int) < 32`.
+ */
+#if (__SIZEOF_INT__ >= 32)
+# define FLS_AT_LEAST_32(v) fls(v)
+#else
+# define FLS_AT_LEAST_32(v) fls64(v)
+#endif
+
/*
* delta_exec * weight / lw.weight
* OR
@@ -229,27 +239,31 @@ static void __update_inv_weight(struct load_weight *lw)
static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct
load_weight *lw)
{
u64 fact = scale_load_down(weight);
+ u32 fact_hi = (u32)(fact >> 32);
int shift = WMULT_SHIFT;
+ int fs;
__update_inv_weight(lw);
- if (unlikely(fact >> 32)) {
- while (fact >> 32) {
- fact >>= 1;
- shift--;
- }
+ if (unlikely(fact_hi)) {
+ fs = FLS_AT_LEAST_32(fact_hi);
+ shift -= fs;
+ fact >>= fs;
}
fact = mul_u32_u32(fact, lw->inv_weight);
- while (fact >> 32) {
- fact >>= 1;
- shift--;
+ fact_hi = (u32)(fact >> 32);
+ if (fact_hi) {
+ fs = FLS_AT_LEAST_32(fact_hi);
+ shift -= fs;
+ fact >>= fs;
}
return mul_u64_u32_shr(delta_exec, fact, shift);
}
+#undef FLS_AT_LEAST_32
const struct sched_class fair_sched_class;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 10a1522b1e30..714af71cf983 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -36,6 +36,7 @@
#include <uapi/linux/sched/types.h>
#include <linux/binfmts.h>
+#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/compat.h>
#include <linux/context_tracking.h>
--
2.30.1.766.gb4fecdf3b7-goog
Powered by blists - more mailing lists