[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230419155341.v8.2.I1223c11c88937bd0cbd9b086d4ef216985797302@changeid>
Date: Wed, 19 Apr 2023 15:55:56 -0700
From: Douglas Anderson <dianders@...omium.org>
To: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Sumit Garg <sumit.garg@...aro.org>,
Daniel Thompson <daniel.thompson@...aro.org>,
Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>
Cc: ito-yuichi@...itsu.com, kgdb-bugreport@...ts.sourceforge.net,
Chen-Yu Tsai <wens@...e.org>,
Masayoshi Mizuma <msys.mizuma@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Ard Biesheuvel <ardb@...nel.org>,
"Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
linux-arm-kernel@...ts.infradead.org,
Stephen Boyd <swboyd@...omium.org>,
Lecopzer Chen <lecopzer.chen@...iatek.com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-perf-users@...r.kernel.org,
Masayoshi Mizuma <m.mizuma@...fujitsu.com>,
Douglas Anderson <dianders@...omium.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH v8 02/10] irqchip/gic-v3: Enable support for SGIs to act as NMIs
From: Sumit Garg <sumit.garg@...aro.org>
Add support to handle SGIs as pseudo NMIs. As SGIs or IPIs default to a
special flow handler: handle_percpu_devid_fasteoi_ipi(), so skip NMI
handler update in case of SGIs.
Also, enable NMI support prior to gic_smp_init() as allocation of SGIs
as IRQs/NMIs happen as part of this routine.
Signed-off-by: Sumit Garg <sumit.garg@...aro.org>
Reviewed-by: Masayoshi Mizuma <m.mizuma@...fujitsu.com>
Tested-by: Chen-Yu Tsai <wens@...e.org>
Signed-off-by: Douglas Anderson <dianders@...omium.org>
---
(no changes since v1)
drivers/irqchip/irq-gic-v3.c | 29 +++++++++++++++++++++--------
1 file changed, 21 insertions(+), 8 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fd134e1f481a..b402a81fea59 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -482,6 +482,7 @@ static u32 gic_get_ppi_index(struct irq_data *d)
static int gic_irq_nmi_setup(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
+ u32 idx;
if (!gic_supports_nmi())
return -EINVAL;
@@ -499,16 +500,22 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_ppi_index(d);
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ break;
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ idx = gic_get_ppi_index(d);
/* Setting up PPI as NMI, only switch handler for first NMI */
if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
refcount_set(&ppi_nmi_refs[idx], 1);
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
}
- } else {
+ break;
+ default:
desc->handle_irq = handle_fasteoi_nmi;
+ break;
}
gic_irq_set_prio(d, GICD_INT_NMI_PRI);
@@ -519,6 +526,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
static void gic_irq_nmi_teardown(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
+ u32 idx;
if (WARN_ON(!gic_supports_nmi()))
return;
@@ -536,14 +544,20 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_ppi_index(d);
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ break;
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ idx = gic_get_ppi_index(d);
/* Tearing down NMI, only switch handler for last NMI */
if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
desc->handle_irq = handle_percpu_devid_irq;
- } else {
+ break;
+ default:
desc->handle_irq = handle_fasteoi_irq;
+ break;
}
gic_irq_set_prio(d, GICD_INT_DEF_PRI);
@@ -1867,6 +1881,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_dist_init();
gic_cpu_init();
+ gic_enable_nmi_support();
gic_smp_init();
gic_cpu_pm_init();
@@ -1879,8 +1894,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gicv2m_init(handle, gic_data.domain);
}
- gic_enable_nmi_support();
-
return 0;
out_free:
--
2.40.0.634.g4ca3ef3211-goog
Powered by blists - more mailing lists