[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221121140050.450294627@linutronix.de>
Date: Mon, 21 Nov 2022 15:40:11 +0100 (CET)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Will Deacon <will@...nel.org>, linux-pci@...r.kernel.org,
Bjorn Helgaas <bhelgaas@...gle.com>,
Lorenzo Pieralisi <lorenzo.pieralisi@....com>,
Marc Zyngier <maz@...nel.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Jason Gunthorpe <jgg@...lanox.com>,
Andrew Lunn <andrew@...n.ch>,
Gregory Clement <gregory.clement@...tlin.com>,
Sebastian Hesselbarth <sebastian.hesselbarth@...il.com>,
Ammar Faizi <ammarfaizi2@...weeb.org>,
Robin Murphy <robin.murphy@....com>,
Lorenzo Pieralisi <lpieralisi@...nel.org>,
Nishanth Menon <nm@...com>, Tero Kristo <kristo@...nel.org>,
Santosh Shilimkar <ssantosh@...nel.org>,
linux-arm-kernel@...ts.infradead.org,
Vinod Koul <vkoul@...nel.org>, Sinan Kaya <okaya@...nel.org>,
Andy Gross <agross@...nel.org>,
Bjorn Andersson <andersson@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Shameerali Kolothum Thodi
<shameerali.kolothum.thodi@...wei.com>,
Zenghui Yu <yuzenghui@...wei.com>,
Shawn Guo <shawnguo@...nel.org>,
Sascha Hauer <s.hauer@...gutronix.de>,
Fabio Estevam <festevam@...il.com>
Subject: [patch V2 34/40] irqchip/irq-mvebu-icu: Prepare for real per device MSI
The core infrastructure has everything in place to switch ICU to per
device MSI domains and avoid the convoluted construct of the existing
platform-MSI layering violation.
The new infrastructure provides a wired interrupt specific interface in the
MSI core which converts the 'hardware interrupt number + trigger type'
allocation which is required for wired interrupts in the regular irqdomain
code to a normal MSI allocation.
The hardware interrupt number and the trigger type are stored in the MSI
descriptor device cookie by the core code so the ICU specific code can
retrieve them.
The new per device domain is only instantiated when the irqdomain which is
associated to the ICU device provides MSI parent functionality. Up to
that point it invokes the existing code. Once the parent is converted the
code for the current platform-MSI mechanism is removed.
The new domain shares the interrupt chip callbacks and the translation
function. The only new functionality aside of filling out the
msi_domain_templates is a domain specific set_desc() callback, which will go
away once all platform-MSI code has been converted.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Andrew Lunn <andrew@...n.ch>
Cc: Gregory Clement <gregory.clement@...tlin.com>
Cc: Sebastian Hesselbarth <sebastian.hesselbarth@...il.com>
---
drivers/irqchip/irq-mvebu-icu.c | 181 +++++++++++++++++++++++++++++++++++++---
1 file changed, 170 insertions(+), 11 deletions(-)
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -20,6 +20,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include "irq-gic-msi-lib.h"
+
#include <dt-bindings/interrupt-controller/mvebu-icu.h>
/* ICU registers */
@@ -60,14 +62,52 @@ struct mvebu_icu_msi_data {
const struct mvebu_icu_subset_data *subset_data;
};
-struct mvebu_icu_irq_data {
- struct mvebu_icu *icu;
- unsigned int icu_group;
- unsigned int type;
-};
-
static DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
+ struct mvebu_icu_msi_data *msi_data = d->host_data;
+ struct mvebu_icu *icu = msi_data->icu;
+
+ /* Check the count of the parameters in dt */
+ if (WARN_ON(fwspec->param_count != param_count)) {
+ dev_err(icu->dev, "wrong ICU parameter count %d\n",
+ fwspec->param_count);
+ return -EINVAL;
+ }
+
+ if (static_branch_unlikely(&legacy_bindings)) {
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ if (fwspec->param[0] != ICU_GRP_NSR) {
+ dev_err(icu->dev, "wrong ICU group type %x\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+ } else {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The ICU receives level interrupts. While the NSR are also
+ * level interrupts, SEI are edge interrupts. Force the type
+ * here in this case. Please note that this makes the interrupt
+ * handling unreliable.
+ */
+ if (msi_data->subset_data->icu_group == ICU_GRP_SEI)
+ *type = IRQ_TYPE_EDGE_RISING;
+ }
+
+ if (*hwirq >= ICU_MAX_IRQS) {
+ dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void mvebu_icu_init(struct mvebu_icu *icu,
struct mvebu_icu_msi_data *msi_data,
struct msi_msg *msg)
@@ -89,6 +129,14 @@ static void mvebu_icu_init(struct mvebu_
writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
}
+/* Start of area to be removed once all parent chips provide MSI parent */
+
+struct mvebu_icu_irq_data {
+ struct mvebu_icu *icu;
+ unsigned int icu_group;
+ unsigned int type;
+};
+
static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct irq_data *d = irq_get_irq_data(desc->irq);
@@ -269,6 +317,109 @@ static const struct irq_domain_ops mvebu
.free = mvebu_icu_irq_domain_free,
};
+/* End of removal area */
+
+static int mvebu_icu_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg)
+{
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data);
+ return irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
+}
+
+static void mvebu_icu_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (u32)desc->data.icookie.value;
+}
+
+static void mvebu_icu_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct mvebu_icu_msi_data *msi_data = d->chip_data;
+ unsigned int icu_group = msi_data->subset_data->icu_group;
+ struct msi_desc *desc = irq_data_get_msi_desc(d);
+ struct mvebu_icu *icu = msi_data->icu;
+ unsigned int type;
+ u32 icu_int;
+
+ if (msg->address_lo || msg->address_hi) {
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
+ /* Configure the ICU with irq number & type */
+ icu_int = msg->data | ICU_INT_ENABLE;
+ type = (unsigned int)(desc->data.icookie.value >> 32);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ icu_int |= ICU_IS_EDGE;
+ icu_int |= icu_group << ICU_GROUP_SHIFT;
+ } else {
+ /* De-configure the ICU */
+ icu_int = 0;
+ }
+
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
+
+ /*
+ * The SATA unit has 2 ports, and a dedicated ICU entry per
+ * port. The ahci sata driver supports only one irq interrupt
+ * per SATA unit. To solve this conflict, we configure the 2
+ * SATA wired interrupts in the south bridge into 1 GIC
+ * interrupt in the north bridge. Even if only a single port
+ * is enabled, if sata node is enabled, both interrupts are
+ * configured (regardless of which port is actually in use).
+ */
+ if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
+ writel_relaxed(icu_int, icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
+ }
+}
+
+static const struct msi_domain_template mvebu_icu_nsr_msi_template = {
+ .chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_write_msi_msg = mvebu_icu_write_msi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+ },
+
+ .ops = {
+ .msi_translate = mvebu_icu_translate,
+ .msi_init = mvebu_icu_msi_init,
+ .set_desc = mvebu_icu_set_desc,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_LEVEL_CAPABLE |
+ MSI_FLAG_USE_DEV_FWNODE,
+ },
+};
+
+static const struct msi_domain_template mvebu_icu_sei_msi_template = {
+ .chip = {
+ .name = "ICU-SEI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_write_msi_msg = mvebu_icu_write_msi_msg,
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+ },
+
+ .ops = {
+ .msi_translate = mvebu_icu_translate,
+ .msi_init = mvebu_icu_msi_init,
+ .set_desc = mvebu_icu_set_desc,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_LEVEL_CAPABLE |
+ MSI_FLAG_USE_DEV_FWNODE,
+ },
+};
+
static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
.icu_group = ICU_GRP_NSR,
.offset_set_ah = ICU_SETSPI_NSR_AH,
@@ -298,7 +449,6 @@ static const struct of_device_id mvebu_i
static int mvebu_icu_subset_probe(struct platform_device *pdev)
{
struct mvebu_icu_msi_data *msi_data;
- struct device_node *msi_parent_dn;
struct device *dev = &pdev->dev;
struct irq_domain *irq_domain;
@@ -314,15 +464,24 @@ static int mvebu_icu_subset_probe(struct
msi_data->subset_data = of_device_get_match_data(dev);
}
- dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
- DOMAIN_BUS_PLATFORM_MSI);
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node, DOMAIN_BUS_PLATFORM_MSI);
if (!dev->msi.domain)
return -EPROBE_DEFER;
- msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
- if (!msi_parent_dn)
+ if (!irq_domain_get_of_node(dev->msi.domain))
return -ENODEV;
+ if (irq_domain_is_msi_parent(dev->msi.domain)) {
+ bool sei = msi_data->subset_data->icu_group == ICU_GRP_SEI;
+ const struct msi_domain_template *tmpl;
+
+ tmpl = sei ? &mvebu_icu_sei_msi_template : &mvebu_icu_nsr_msi_template;
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, tmpl,
+ ICU_MAX_IRQS, NULL, msi_data))
+ return -ENOMEM;
+ }
+
irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
mvebu_icu_write_msg,
&mvebu_icu_domain_ops,
Powered by blists - more mailing lists