[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20170719133940.uytsixvfgpmo3ane@hirez.programming.kicks-ass.net>
Date: Wed, 19 Jul 2017 15:39:41 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: hare@...e.com, bhelgaas@...gle.com, mingo@...nel.org,
axboe@...nel.dk, vincent.guittot@...aro.org, hpa@...or.com,
nicolas.pitre@...aro.org, rafael@...nel.org,
daniel.lezcano@...aro.org, linux-kernel@...r.kernel.org,
tglx@...utronix.de
Cc: linux-tip-commits@...r.kernel.org
Subject: Re: [tip:irq/core] genirq/timings: Add infrastructure for estimating
the next interrupt arrival time
On Sat, Jun 24, 2017 at 03:02:30AM -0700, tip-bot for Daniel Lezcano wrote:
> + /*
> + * The interrupt is considered stable enough to try to predict
> + * the next event on it.
> + */
> + irqs->valid = 1;
> +
> + /*
> + * Online average algorithm:
> + *
> + * new_average = average + ((value - average) / count)
> + *
> + * The variance computation depends on the new average
> + * to be computed here first.
> + *
> + */
> + irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);
> +
> + /*
> + * Online variance algorithm:
> + *
> + * new_variance = variance + (value - average) x (value - new_average)
> + *
> + * Warning: irqs->avg is updated with the line above, hence
> + * 'interval - irqs->avg' is no longer equal to 'diff'
> + */
> + irqs->variance = irqs->variance + (diff * (interval - irqs->avg));
> +
> + /*
> + * Update the next event
> + */
> + irqs->next_evt = ts + irqs->avg;
> +}
This implements the CDF bias I spoke of in that other thread. Very much
RFC, has not in fact been near a compiler.
---
Subject: irq/timings: Add estimation bias
From: Peter Zijlstra <peterz@...radead.org>
Date: Wed Jul 19 14:59:17 CEST 2017
When estimating a normal random variable the average is, per
definition, too long 50% of the time.
When we use this variable to select idle states, picking too deep an
idle state results in increased exit latency, which, if the estimate
was wrong, results in decreased performance.
Hence we'd like to be able to bias our estimate to be short a specific
percent of the time. We can readily compute this using the Cumulative
Distribution Function, since the CDF(x) gives P(X <= x).
Implement this using a inverse Z table for the normal CDF.
======8<====[ cdf.cc ]====>8======
#include <boost/math/special_functions/erf.hpp>
#include <math.h>
#include <stdio.h>
#define CDF_BITS 16
int main(void)
{
double z;
int i;
printf("#define CDF_BITS\t%d\n\n", CDF_BITS);
printf("static const int normal_inv_cdf[] = {\n");
for (i=50; i<100; i++) {
z = sqrt(2.0) * boost::math::erf_inv( (2*(double)i)/100.0 - 1.0 );
printf("\t/* %02d%% */ 0x%05x,\n", i, (int)round((1 << CDF_BITS) * z));
}
printf("};\n");
return 0;
}
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
include/linux/normal-cdf.h | 74 +++++++++++++++++++++++++++++++++++++++++++++
kernel/irq/timings.c | 23 +++++++++++++
2 files changed, 97 insertions(+)
--- /dev/null
+++ b/include/linux/normal-cdf.h
@@ -0,0 +1,74 @@
+#ifndef _LINUX_NORMAL_CDF_H
+#define _LINUX_NORMAL_CDF_H
+
+#define NORMAL_CDF_BITS 16
+
+/*
+ * x - mu
+ * Z = ------
+ * sigma
+ *
+ *
+ * 1 Z
+ * CDF(x) = - [1 + erf( ------ )] = p
+ * 2 sqrt(2)
+ *
+ * -1
+ * Z = sqrt(2) * erf (2p - 1)
+ */
+
+static const int normal_inv_cdf_Z[] = {
+ /* 50% */ 0x00000,
+ /* 51% */ 0x0066b,
+ /* 52% */ 0x00cd7,
+ /* 53% */ 0x01345,
+ /* 54% */ 0x019b6,
+ /* 55% */ 0x0202b,
+ /* 56% */ 0x026a6,
+ /* 57% */ 0x02d27,
+ /* 58% */ 0x033af,
+ /* 59% */ 0x03a40,
+ /* 60% */ 0x040db,
+ /* 61% */ 0x04781,
+ /* 62% */ 0x04e34,
+ /* 63% */ 0x054f4,
+ /* 64% */ 0x05bc4,
+ /* 65% */ 0x062a4,
+ /* 66% */ 0x06997,
+ /* 67% */ 0x0709e,
+ /* 68% */ 0x077bb,
+ /* 69% */ 0x07ef0,
+ /* 70% */ 0x0863f,
+ /* 71% */ 0x08dab,
+ /* 72% */ 0x09535,
+ /* 73% */ 0x09ce1,
+ /* 74% */ 0x0a4b2,
+ /* 75% */ 0x0acab,
+ /* 76% */ 0x0b4d0,
+ /* 77% */ 0x0bd25,
+ /* 78% */ 0x0c5ae,
+ /* 79% */ 0x0ce72,
+ /* 80% */ 0x0d774,
+ /* 81% */ 0x0e0be,
+ /* 82% */ 0x0ea55,
+ /* 83% */ 0x0f444,
+ /* 84% */ 0x0fe95,
+ /* 85% */ 0x10954,
+ /* 86% */ 0x11490,
+ /* 87% */ 0x1205b,
+ /* 88% */ 0x12ccc,
+ /* 89% */ 0x139fe,
+ /* 90% */ 0x14814,
+ /* 91% */ 0x1573c,
+ /* 92% */ 0x167b3,
+ /* 93% */ 0x179cd,
+ /* 94% */ 0x18e06,
+ /* 95% */ 0x1a515,
+ /* 96% */ 0x1c02d,
+ /* 97% */ 0x1e17c,
+ /* 98% */ 0x20dc2,
+ /* 99% */ 0x2538c,
+};
+
+#endif /* _LINUX_NORMAL_CDF_H */
+
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -16,6 +16,7 @@
#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/math64.h>
+#include <linux/normal-cdf.h>
#include <trace/events/irq.h>
@@ -26,6 +27,21 @@ DEFINE_STATIC_KEY_FALSE(irq_timing_enabl
DEFINE_PER_CPU(struct irq_timings, irq_timings);
static DEFINE_PER_CPU(struct list_head, irqt_list);
+/*
+ * Used to reduce estimation error; where the average of the distribution
+ * is too long 50% of the time, we can compute the value for which we're no
+ * more than @pct too long using the CDF.
+ *
+ * This is important for performance reasons, picking too deep a C state
+ * results in increased exit latencies when we were wrong, and hence impacts
+ * performance. By changing the estimate to be shorter, we alleviate this.
+ *
+ * Default to 50% for a decent power / performance ratio.
+ *
+ * 1 <= irq_timings_pct <= 50
+ */
+int irq_timings_pct = 50;
+
struct irqt_stat {
u64 next_evt;
u64 last_ts;
@@ -226,6 +242,13 @@ static void irqs_update(struct irqt_stat
* Update the next event
*/
irqs->next_evt = ts + irqs->avg;
+
+ if (irq_timings_pct != 50) {
+ int Z = normal_inv_cdf_Z[50 - irq_timings_pct];
+ u64 stdev = int_sqrt(irqs->variance);
+
+ irqs->next_evt -= (Z * stdev) >> NORMAL_CDF_BITS;
+ }
}
/**
Powered by blists - more mailing lists