[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <171293487060.10875.15677909235587319984.tip-bot2@tip-bot2>
Date: Fri, 12 Apr 2024 15:14:30 -0000
From: "tip-bot2 for Bitao Hu" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Thomas Gleixner <tglx@...utronix.de>, Bitao Hu <yaoma@...ux.alibaba.com>,
Liu Song <liusong@...ux.alibaba.com>,
Douglas Anderson <dianders@...omium.org>, x86@...nel.org,
linux-kernel@...r.kernel.org, maz@...nel.org
Subject: [tip: irq/core] genirq: Avoid summation loops for /proc/interrupts
The following commit has been merged into the irq/core branch of tip:
Commit-ID: 25a4a015118037809c97d089d69e927737e589e1
Gitweb: https://git.kernel.org/tip/25a4a015118037809c97d089d69e927737e589e1
Author: Bitao Hu <yaoma@...ux.alibaba.com>
AuthorDate: Thu, 11 Apr 2024 15:41:32 +08:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Fri, 12 Apr 2024 17:08:05 +02:00
genirq: Avoid summation loops for /proc/interrupts
show_interrupts() unconditionally accumulates the per CPU interrupt
statistics to determine whether an interrupt was ever raised.
This can be avoided for all interrupts which are not strictly per CPU
and not of type NMI because those interrupts provide already an
accumulated counter. The required logic is already implemented in
kstat_irqs().
Split the inner access logic out of kstat_irqs() and use it for
kstat_irqs() and show_interrupts() to avoid the accumulation loop
when possible.
Originally-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Bitao Hu <yaoma@...ux.alibaba.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Liu Song <liusong@...ux.alibaba.com>
Reviewed-by: Douglas Anderson <dianders@...omium.org>
Link: https://lore.kernel.org/r/20240411074134.30922-4-yaoma@linux.alibaba.com
---
kernel/irq/internals.h | 2 ++
kernel/irq/irqdesc.c | 16 +++++++++++-----
kernel/irq/proc.c | 6 ++----
3 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 1d92532..6c43ef3 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -98,6 +98,8 @@ extern void mask_irq(struct irq_desc *desc);
extern void unmask_irq(struct irq_desc *desc);
extern void unmask_threaded_irq(struct irq_desc *desc);
+extern unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask);
+
#ifdef CONFIG_SPARSE_IRQ
static inline void irq_mark_irq(unsigned int irq) { }
#else
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index f348faf..3820931 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -976,24 +976,30 @@ static bool irq_is_nmi(struct irq_desc *desc)
return desc->istate & IRQS_NMI;
}
-static unsigned int kstat_irqs(unsigned int irq)
+unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned int sum = 0;
int cpu;
- if (!desc || !desc->kstat_irqs)
- return 0;
if (!irq_settings_is_per_cpu_devid(desc) &&
!irq_settings_is_per_cpu(desc) &&
!irq_is_nmi(desc))
return data_race(desc->tot_count);
- for_each_possible_cpu(cpu)
+ for_each_cpu(cpu, cpumask)
sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
return sum;
}
+static unsigned int kstat_irqs(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc || !desc->kstat_irqs)
+ return 0;
+ return kstat_irqs_desc(desc, cpu_possible_mask);
+}
+
#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
void kstat_snapshot_irqs(void)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 6954e0a..5c320c3 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -488,10 +488,8 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc || irq_settings_is_hidden(desc))
goto outsparse;
- if (desc->kstat_irqs) {
- for_each_online_cpu(j)
- any_count |= data_race(per_cpu(desc->kstat_irqs->cnt, j));
- }
+ if (desc->kstat_irqs)
+ any_count = kstat_irqs_desc(desc, cpu_online_mask);
if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
goto outsparse;
Powered by blists - more mailing lists