[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <176158267154.2601451.17024825684483443214.tip-bot2@tip-bot2>
Date: Mon, 27 Oct 2025 16:31:11 -0000
From: "tip-bot2 for Marc Zyngier" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: irq/core] irqchip/gic-v3: Switch high priority PPIs over to
handle_percpu_devid_irq()
The following commit has been merged into the irq/core branch of tip:
Commit-ID: 21bbbc50f398f5d58a14669ee18b10a2bd30e916
Gitweb: https://git.kernel.org/tip/21bbbc50f398f5d58a14669ee18b10a2bd30e916
Author: Marc Zyngier <maz@...nel.org>
AuthorDate: Mon, 20 Oct 2025 13:29:27 +01:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Mon, 27 Oct 2025 17:16:34 +01:00
irqchip/gic-v3: Switch high priority PPIs over to handle_percpu_devid_irq()
It so appears that handle_percpu_devid_irq() is extremely similar to
handle_percpu_devid_fasteoi_nmi(), and that the differences do no justify
the horrid machinery in the GICv3 driver to handle the flow handler switch.
Stick with the standard flow handler, even for NMIs.
Suggested-by: Will Deacon <will@...nel.org>
Signed-off-by: Marc Zyngier <maz@...nel.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Tested-by: Will Deacon <will@...nel.org>
Link: https://patch.msgid.link/20251020122944.3074811-11-maz@kernel.org
---
drivers/irqchip/irq-gic-v3.c | 54 +----------------------------------
1 file changed, 2 insertions(+), 52 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index cf0ba83..dd2d6d7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -230,9 +230,6 @@ static void __init gic_prio_init(void)
!cpus_have_group0);
}
-/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
-static refcount_t *rdist_nmi_refs;
-
static struct gic_kvm_info gic_v3_kvm_info __initdata;
static DEFINE_PER_CPU(bool, has_rss);
@@ -608,24 +605,6 @@ static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
}
}
-static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
-{
- switch (__get_intid_range(hwirq)) {
- case SGI_RANGE:
- case PPI_RANGE:
- return hwirq;
- case EPPI_RANGE:
- return hwirq - EPPI_BASE_INTID + 32;
- default:
- unreachable();
- }
-}
-
-static u32 gic_get_rdist_index(struct irq_data *d)
-{
- return __gic_get_rdist_index(d->hwirq);
-}
-
static int gic_irq_nmi_setup(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
@@ -646,20 +625,8 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_rdist_index(d);
-
- /*
- * Setting up a percpu interrupt as NMI, only switch handler
- * for first NMI
- */
- if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
- refcount_set(&rdist_nmi_refs[idx], 1);
- desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
- }
- } else {
+ if (!gic_irq_in_rdist(d))
desc->handle_irq = handle_fasteoi_nmi;
- }
gic_irq_set_prio(d, dist_prio_nmi);
@@ -686,15 +653,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_rdist_index(d);
-
- /* Tearing down NMI, only switch handler for last NMI */
- if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
- desc->handle_irq = handle_percpu_devid_irq;
- } else {
+ if (!gic_irq_in_rdist(d))
desc->handle_irq = handle_fasteoi_irq;
- }
gic_irq_set_prio(d, dist_prio_irq);
}
@@ -2080,19 +2040,9 @@ static const struct gic_quirk gic_quirks[] = {
static void gic_enable_nmi_support(void)
{
- int i;
-
if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
- rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
- sizeof(*rdist_nmi_refs), GFP_KERNEL);
- if (!rdist_nmi_refs)
- return;
-
- for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
- refcount_set(&rdist_nmi_refs[i], 0);
-
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
Powered by blists - more mailing lists