[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230601143109.v9.1.I1223c11c88937bd0cbd9b086d4ef216985797302@changeid>
Date: Thu, 1 Jun 2023 14:31:45 -0700
From: Douglas Anderson <dianders@...omium.org>
To: Mark Rutland <mark.rutland@....com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Sumit Garg <sumit.garg@...aro.org>,
Daniel Thompson <daniel.thompson@...aro.org>,
Marc Zyngier <maz@...nel.org>
Cc: linux-perf-users@...r.kernel.org, ito-yuichi@...itsu.com,
Chen-Yu Tsai <wens@...e.org>, Ard Biesheuvel <ardb@...nel.org>,
Stephen Boyd <swboyd@...omium.org>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
linux-arm-kernel@...ts.infradead.org,
kgdb-bugreport@...ts.sourceforge.net,
Masayoshi Mizuma <msys.mizuma@...il.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
Lecopzer Chen <lecopzer.chen@...iatek.com>,
Masayoshi Mizuma <m.mizuma@...fujitsu.com>,
Douglas Anderson <dianders@...omium.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH v9 1/7] irqchip/gic-v3: Enable support for SGIs to act as NMIs
From: Sumit Garg <sumit.garg@...aro.org>
Add support to handle SGIs as pseudo NMIs. As SGIs or IPIs default to a
special flow handler: handle_percpu_devid_fasteoi_ipi(), so skip NMI
handler update in case of SGIs.
Also, enable NMI support prior to gic_smp_init() as allocation of SGIs
as IRQs/NMIs happen as part of this routine.
Signed-off-by: Sumit Garg <sumit.garg@...aro.org>
Reviewed-by: Masayoshi Mizuma <m.mizuma@...fujitsu.com>
Tested-by: Chen-Yu Tsai <wens@...e.org>
Signed-off-by: Douglas Anderson <dianders@...omium.org>
---
(no changes since v1)
drivers/irqchip/irq-gic-v3.c | 29 +++++++++++++++++++++--------
1 file changed, 21 insertions(+), 8 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0c6c1af9a5b7..ed37e02d4c5f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -525,6 +525,7 @@ static u32 gic_get_ppi_index(struct irq_data *d)
static int gic_irq_nmi_setup(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
+ u32 idx;
if (!gic_supports_nmi())
return -EINVAL;
@@ -542,16 +543,22 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_ppi_index(d);
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ break;
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ idx = gic_get_ppi_index(d);
/* Setting up PPI as NMI, only switch handler for first NMI */
if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
refcount_set(&ppi_nmi_refs[idx], 1);
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
}
- } else {
+ break;
+ default:
desc->handle_irq = handle_fasteoi_nmi;
+ break;
}
gic_irq_set_prio(d, GICD_INT_NMI_PRI);
@@ -562,6 +569,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
static void gic_irq_nmi_teardown(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
+ u32 idx;
if (WARN_ON(!gic_supports_nmi()))
return;
@@ -579,14 +587,20 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_ppi_index(d);
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ break;
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ idx = gic_get_ppi_index(d);
/* Tearing down NMI, only switch handler for last NMI */
if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
desc->handle_irq = handle_percpu_devid_irq;
- } else {
+ break;
+ default:
desc->handle_irq = handle_fasteoi_irq;
+ break;
}
gic_irq_set_prio(d, GICD_INT_DEF_PRI);
@@ -2001,6 +2015,7 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gic_dist_init();
gic_cpu_init();
+ gic_enable_nmi_support();
gic_smp_init();
gic_cpu_pm_init();
@@ -2013,8 +2028,6 @@ static int __init gic_init_bases(phys_addr_t dist_phys_base,
gicv2m_init(handle, gic_data.domain);
}
- gic_enable_nmi_support();
-
return 0;
out_free:
--
2.41.0.rc2.161.g9c6817b8e7-goog
Powered by blists - more mailing lists