lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170719113804.chumtldxje4u6dui@hirez.programming.kicks-ass.net>
Date:   Wed, 19 Jul 2017 13:38:04 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     hare@...e.com, bhelgaas@...gle.com, mingo@...nel.org,
        axboe@...nel.dk, vincent.guittot@...aro.org, hpa@...or.com,
        nicolas.pitre@...aro.org, rafael@...nel.org,
        daniel.lezcano@...aro.org, linux-kernel@...r.kernel.org,
        tglx@...utronix.de
Cc:     linux-tip-commits@...r.kernel.org
Subject: Re: [tip:irq/core] genirq/timings: Add infrastructure for estimating
 the next interrupt arrival time

On Sat, Jun 24, 2017 at 03:02:30AM -0700, tip-bot for Daniel Lezcano wrote:
> +	/*
> +	 * Look in the list of interrupts' statistics, the earliest
> +	 * next event.
> +	 */
> +	idr_for_each_entry(&irqt_stats, s, i) {
> +
> +		irqs = this_cpu_ptr(s);
> +
> +		if (!irqs->valid)
> +			continue;
> +
> +		if (irqs->next_evt <= now) {
> +			irq = i;
> +			next_evt = now;
> +
> +			/*
> +			 * This interrupt mustn't use in the future
> +			 * until new events occur and update the
> +			 * statistics.
> +			 */
> +			irqs->valid = 0;
> +			break;
> +		}
> +
> +		if (irqs->next_evt < next_evt) {
> +			irq = i;
> +			next_evt = irqs->next_evt;
> +		}
> +	}

That is a fairly expensive thing to do.. sure the 'continue' avoids the
very worst of it, but its still painful.

How about something like so on top? (compile tested only)

---
 kernel/irq/internals.h |  4 +++-
 kernel/irq/irqdesc.c   |  2 ++
 kernel/irq/timings.c   | 35 ++++++++++++++++++++++-------------
 3 files changed, 27 insertions(+), 14 deletions(-)

diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index dbfba9933ed2..b96511ca7cd3 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -275,6 +275,7 @@ struct irq_timings {
 
 DECLARE_PER_CPU(struct irq_timings, irq_timings);
 
+extern void irq_timings_init(void);
 extern void irq_timings_free(int irq);
 extern int irq_timings_alloc(int irq);
 
@@ -357,9 +358,10 @@ static __always_inline void record_irq_time(struct irq_desc *desc)
 	}
 }
 #else
+static inline void irq_timings_init(void) {}
 static inline void irq_remove_timings(struct irq_desc *desc) {}
 static inline void irq_setup_timings(struct irq_desc *desc,
-				     struct irqaction *act) {};
+				     struct irqaction *act) {}
 static inline void record_irq_time(struct irq_desc *desc) {}
 #endif /* CONFIG_IRQ_TIMINGS */
 
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 73be2b3909bd..17e95592f291 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -493,6 +493,7 @@ int __init early_irq_init(void)
 	struct irq_desc *desc;
 
 	init_irq_default_affinity();
+	irq_timings_init();
 
 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
 	initcnt = arch_probe_nr_irqs();
@@ -532,6 +533,7 @@ int __init early_irq_init(void)
 	struct irq_desc *desc;
 
 	init_irq_default_affinity();
+	irq_timings_init();
 
 	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
 
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index c8c1d073fbf1..6bddfbb8d419 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -24,15 +24,17 @@
 DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
 
 DEFINE_PER_CPU(struct irq_timings, irq_timings);
+static DEFINE_PER_CPU(struct list_head, irqt_list);
 
 struct irqt_stat {
-	u64	next_evt;
-	u64	last_ts;
-	u64	variance;
-	u32	avg;
-	u32	nr_samples;
-	int	anomalies;
-	int	valid;
+	u64			next_evt;
+	u64			last_ts;
+	u64			variance;
+	u32			avg;
+	u32			nr_samples;
+	int			anomalies;
+	int			valid;
+	struct list_head	list;
 };
 
 static DEFINE_IDR(irqt_stats);
@@ -197,6 +199,7 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts)
 	 * the next event on it.
 	 */
 	irqs->valid = 1;
+	list_add(&irqs->list, this_cpu_ptr(&irqt_list));
 
 	/*
 	 * Online average algorithm:
@@ -254,8 +257,8 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts)
 u64 irq_timings_next_event(u64 now)
 {
 	struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
-	struct irqt_stat *irqs;
 	struct irqt_stat __percpu *s;
+	struct irqt_stat *irqs, *tmp;
 	u64 ts, next_evt = U64_MAX;
 	int i, irq = 0;
 
@@ -297,11 +300,8 @@ u64 irq_timings_next_event(u64 now)
 	 * Look in the list of interrupts' statistics, the earliest
 	 * next event.
 	 */
-	idr_for_each_entry(&irqt_stats, s, i) {
-
-		irqs = this_cpu_ptr(s);
-
-		if (!irqs->valid)
+	list_for_each_entry_safe(irqs, tmp, this_cpu_ptr(&irqt_list), list) {
+		if (WARN_ON_ONCE(!irqs->valid))
 			continue;
 
 		if (irqs->next_evt <= now) {
@@ -314,6 +314,7 @@ u64 irq_timings_next_event(u64 now)
 			 * statistics.
 			 */
 			irqs->valid = 0;
+			list_del(&irqs->list);
 			break;
 		}
 
@@ -367,3 +368,11 @@ int irq_timings_alloc(int irq)
 
 	return 0;
 }
+
+void __init irq_timings_init(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		INIT_LIST_HEAD(&per_cpu(irqt_list, cpu));
+}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ