[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-74bdf7815dfb3805a37b0bba615814063a227bf5@git.kernel.org>
Date: Fri, 22 Jun 2018 05:30:47 -0700
From: tip-bot for Eric Dumazet <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: mingo@...nel.org, edumazet@...gle.com, tglx@...utronix.de,
linux-kernel@...r.kernel.org, eric.dumazet@...il.com, hpa@...or.com
Subject: [tip:irq/core] genirq: Speedup show_interrupts()
Commit-ID: 74bdf7815dfb3805a37b0bba615814063a227bf5
Gitweb: https://git.kernel.org/tip/74bdf7815dfb3805a37b0bba615814063a227bf5
Author: Eric Dumazet <edumazet@...gle.com>
AuthorDate: Wed, 20 Jun 2018 08:03:32 -0700
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitDate: Fri, 22 Jun 2018 14:22:58 +0200
genirq: Speedup show_interrupts()
Since commit 425a5072dcd1 ("genirq: Free irq_desc with rcu"),
show_interrupts() can be switched to rcu locking, which removes possible
contention on sparse_irq_lock.
The per_cpu count scan and print can be done without holding desc spinlock.
And there is no need to call kstat_irqs_cpu() and abuse irq_to_desc() while
holding rcu read lock, since desc and desc->kstat_irqs wont disappear or
change.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Eric Dumazet <eric.dumazet@...il.com>
Link: https://lkml.kernel.org/r/20180620150332.163320-1-edumazet@google.com
---
kernel/irq/proc.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 37eda10f5c36..da9addb8d655 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- irq_lock_sparse();
+ rcu_read_lock();
desc = irq_to_desc(i);
if (!desc)
goto outsparse;
- raw_spin_lock_irqsave(&desc->lock, flags);
- for_each_online_cpu(j)
- any_count |= kstat_irqs_cpu(i, j);
- action = desc->action;
- if ((!action || irq_desc_is_chained(desc)) && !any_count)
- goto out;
+ if (desc->kstat_irqs)
+ for_each_online_cpu(j)
+ any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+
+ if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+ goto outsparse;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+ seq_printf(p, "%10u ", desc->kstat_irqs ?
+ *per_cpu_ptr(desc->kstat_irqs, j) : 0);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (desc->name)
seq_printf(p, "-%-8s", desc->name);
+ action = desc->action;
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
}
seq_putc(p, '\n');
-out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
outsparse:
- irq_unlock_sparse();
+ rcu_read_unlock();
return 0;
}
#endif
Powered by blists - more mailing lists