[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <de3f1d737831b251e9cd2cbf9e4c732a5bbba13a.1750858083.git.namcao@linutronix.de>
Date: Thu, 26 Jun 2025 16:48:06 +0200
From: Nam Cao <namcao@...utronix.de>
To: Marc Zyngier <maz@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Lorenzo Pieralisi <lpieralisi@...nel.org>,
Krzysztof Wilczyński <kwilczynski@...nel.org>,
Manivannan Sadhasivam <mani@...nel.org>,
Rob Herring <robh@...nel.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
linux-pci@...r.kernel.org,
linux-kernel@...r.kernel.org,
Karthikeyan Mitran <m.karthikeyan@...iveil.co.in>,
Hou Zhiqiang <Zhiqiang.Hou@....com>,
Thomas Petazzoni <thomas.petazzoni@...tlin.com>,
Pali Rohár <pali@...nel.org>,
"K . Y . Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>,
Dexuan Cui <decui@...rosoft.com>,
Joyce Ooi <joyce.ooi@...el.com>,
Jim Quinlan <jim2101024@...il.com>,
Nicolas Saenz Julienne <nsaenz@...nel.org>,
Florian Fainelli <florian.fainelli@...adcom.com>,
Broadcom internal kernel review list <bcm-kernel-feedback-list@...adcom.com>,
Ray Jui <rjui@...adcom.com>,
Scott Branden <sbranden@...adcom.com>,
Ryder Lee <ryder.lee@...iatek.com>,
Jianjun Wang <jianjun.wang@...iatek.com>,
Marek Vasut <marek.vasut+renesas@...il.com>,
Yoshihiro Shimoda <yoshihiro.shimoda.uh@...esas.com>,
Michal Simek <michal.simek@....com>,
Daire McNamara <daire.mcnamara@...rochip.com>,
Nirmal Patel <nirmal.patel@...ux.intel.com>,
Jonathan Derrick <jonathan.derrick@...ux.dev>,
Matthias Brugger <matthias.bgg@...il.com>,
AngeloGioacchino Del Regno <angelogioacchino.delregno@...labora.com>,
linux-arm-kernel@...ts.infradead.org,
linux-hyperv@...r.kernel.org,
linux-rpi-kernel@...ts.infradead.org,
linux-mediatek@...ts.infradead.org,
linux-renesas-soc@...r.kernel.org
Cc: Nam Cao <namcao@...utronix.de>
Subject: [PATCH 16/16] PCI: vmd: Switch to msi_create_parent_irq_domain()
Move away from the legacy MSI domain setup, switch to use
msi_create_parent_irq_domain().
Signed-off-by: Nam Cao <namcao@...utronix.de>
---
Cc: Nirmal Patel <nirmal.patel@...ux.intel.com>
Cc: Jonathan Derrick <jonathan.derrick@...ux.dev>
---
drivers/pci/controller/Kconfig | 1 +
drivers/pci/controller/vmd.c | 160 +++++++++++++++++----------------
2 files changed, 82 insertions(+), 79 deletions(-)
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 8f56ffd029ba2..41748d083b933 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -156,6 +156,7 @@ config PCI_IXP4XX
config VMD
depends on PCI_MSI && X86_64 && !UML
tristate "Intel Volume Management Device Driver"
+ select IRQ_MSI_LIB
help
Adds support for the Intel Volume Management Device (VMD). VMD is a
secondary PCI host bridge that allows PCI Express root ports,
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index d9b893bf4e456..38693a9487d9b 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -174,9 +175,6 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
}
-/*
- * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
- */
static void vmd_irq_enable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
@@ -186,7 +184,11 @@ static void vmd_irq_enable(struct irq_data *data)
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
vmdirq->enabled = true;
}
+}
+static void vmd_pci_msi_enable(struct irq_data *data)
+{
+ vmd_irq_enable(data->parent_data);
data->chip->irq_unmask(data);
}
@@ -194,8 +196,6 @@ static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- data->chip->irq_mask(data);
-
scoped_guard(raw_spinlock_irqsave, &list_lock) {
if (vmdirq->enabled) {
list_del_rcu(&vmdirq->node);
@@ -204,19 +204,17 @@ static void vmd_irq_disable(struct irq_data *data)
}
}
+static void vmd_pci_msi_disable(struct irq_data *data)
+{
+ data->chip->irq_mask(data);
+ vmd_irq_disable(data->parent_data);
+}
+
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
- .irq_enable = vmd_irq_enable,
- .irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
};
-static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return 0;
-}
-
/*
* XXX: We can be even smarter selecting the best IRQ once we solve the
* affinity problem.
@@ -250,100 +248,110 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
return &vmd->irqs[best];
}
-static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs);
+
+static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
+ void *arg)
{
- struct msi_desc *desc = arg->desc;
- struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
- struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc;
+ struct vmd_dev *vmd = domain->host_data;
+ struct vmd_irq *vmdirq;
- if (!vmdirq)
- return -ENOMEM;
+ for (int i = 0; i < nr_irqs; ++i) {
+ vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ if (!vmdirq) {
+ vmd_msi_free(domain, virq, i);
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&vmdirq->node);
- vmdirq->irq = vmd_next_irq(vmd, desc);
- vmdirq->virq = virq;
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq + i;
+
+ irq_domain_set_info(domain, virq + i, vmdirq->irq->virq, &vmd_msi_controller,
+ vmdirq, handle_untracked_irq, vmd, NULL);
+ }
- irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
- handle_untracked_irq, vmd, NULL);
return 0;
}
-static void vmd_msi_free(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)
{
struct vmd_irq *vmdirq = irq_get_chip_data(virq);
- synchronize_srcu(&vmdirq->irq->srcu);
+ for (int i = 0; i < nr_irqs; ++i) {
+ synchronize_srcu(&vmdirq->irq->srcu);
- /* XXX: Potential optimization to rebalance */
- scoped_guard(raw_spinlock_irq, &list_lock)
- vmdirq->irq->count--;
+ /* XXX: Potential optimization to rebalance */
+ scoped_guard(raw_spinlock_irq, &list_lock)
+ vmdirq->irq->count--;
- kfree(vmdirq);
+ kfree(vmdirq);
+ }
}
-static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *arg)
+static const struct irq_domain_ops vmd_msi_domain_ops = {
+ .alloc = vmd_msi_alloc,
+ .free = vmd_msi_free,
+};
+
+static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+ if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX))
+ return false;
- if (nvec > vmd->msix_count)
- return vmd->msix_count;
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
- memset(arg, 0, sizeof(*arg));
- return 0;
+ info->chip->irq_enable = vmd_pci_msi_enable;
+ info->chip->irq_disable = vmd_pci_msi_disable;
+ return true;
}
-static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
-{
- arg->desc = desc;
-}
+#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX)
+#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_ops vmd_msi_domain_ops = {
- .get_hwirq = vmd_get_hwirq,
- .msi_init = vmd_msi_init,
- .msi_free = vmd_msi_free,
- .msi_prepare = vmd_msi_prepare,
- .set_desc = vmd_set_desc,
+static const struct msi_parent_ops vmd_msi_parent_ops = {
+ .supported_flags = VMD_MSI_FLAGS_SUPPORTED,
+ .required_flags = VMD_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_VMD_MSI,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "VMD-",
+ .init_dev_msi_info = vmd_init_dev_msi_info,
};
-static struct msi_domain_info vmd_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .ops = &vmd_msi_domain_ops,
- .chip = &vmd_msi_controller,
-};
-
-static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
-{
- u16 reg;
-
- pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
- reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
- (reg | VMCONFIG_MSI_REMAP);
- pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
-}
-
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .size = vmd->msix_count,
+ .ops = &vmd_msi_domain_ops,
+ .host_data = vmd,
+ };
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
- if (!fn)
+ info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
+ if (!info.fwnode)
return -ENODEV;
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ vmd->irq_domain = msi_create_parent_irq_domain(&info, &vmd_msi_parent_ops);
if (!vmd->irq_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENODEV;
}
return 0;
}
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
/*
@@ -874,12 +882,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
ret = vmd_create_irq_domain(vmd);
if (ret)
return ret;
-
- /*
- * Override the IRQ domain bus token so the domain can be
- * distinguished from a regular PCI/MSI domain.
- */
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
} else {
vmd_set_msi_remapping(vmd, false);
}
--
2.39.5
Powered by blists - more mailing lists