[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <163689642456.3249160.13397023971040961440.tglx@xen13>
Date: Sun, 14 Nov 2021 14:30:56 +0100 (CET)
From: Thomas Gleixner <tglx@...utronix.de>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org
Subject: [GIT pull] irq/urgent for v5.16-rc1
Linus,
please pull the latest irq/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-urgent-2021-11-14
up to: 979292af5b51: Merge tag 'irqchip-fixes-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent
A set of fixes for the interrupt subsystem:
- Core code:
A regression fix for the Open Firmware interrupt mapping code where a
interrupt controller property in a node caused a map property in the
same node to be ignored.
- Interrupt chip drivers:
- Workaround a limitation in SiFive PLIC interrupt chip which silently
ignores an EOI when the interrupt line is masked.
- Provide the missing mask/unmask implementation for the CSKY MP
interrupt controller.
- PCI/MSI:
- Prevent a use after free when PCI/MSI interrupts are released by
destroying the sysfs entries before freeing the memory which is
accessed in the sysfs show() function.
- Implement a mask quirk for the Nvidia ION AHCI chip which does not
advertise masking capability despite implementing it. Even worse the
chip comes out of reset with all MSI entries masked, which due to the
missing masking capability never get unmasked.
- Move the check which prevents accessing the MSI[X] masking for XEN
back into the low level accessors. The recent consolidation missed
that these accessors can be invoked from places which do not have
that check which broke XEN. Move them back to he original place
instead of sprinkling tons of these checks all over the code.
Thanks,
tglx
------------------>
Guo Ren (2):
irqchip/csky-mpintc: Fixup mask/unmask implementation
irqchip/sifive-plic: Fixup EOI failed when masked
Marc Zyngier (3):
PCI/MSI: Deal with devices lying about their MSI mask capability
PCI: Add MSI masking quirk for Nvidia ION AHCI
of/irq: Don't ignore interrupt-controller when interrupt-map failed
Thomas Gleixner (2):
PCI/MSI: Move non-mask check back into low level accessors
PCI/MSI: Destroy sysfs before freeing entries
drivers/irqchip/irq-csky-mpintc.c | 8 ++++----
drivers/irqchip/irq-sifive-plic.c | 8 +++++++-
drivers/of/irq.c | 19 ++++++++++++++++---
drivers/pci/msi.c | 39 ++++++++++++++++++++++-----------------
drivers/pci/quirks.c | 6 ++++++
include/linux/msi.h | 2 +-
include/linux/pci.h | 2 ++
kernel/irq/msi.c | 4 ++--
8 files changed, 60 insertions(+), 28 deletions(-)
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index cb403c960ac0..4aebd67d4f8f 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -78,7 +78,7 @@ static void csky_mpintc_handler(struct pt_regs *regs)
readl_relaxed(reg_base + INTCL_RDYIR));
}
-static void csky_mpintc_enable(struct irq_data *d)
+static void csky_mpintc_unmask(struct irq_data *d)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
@@ -87,7 +87,7 @@ static void csky_mpintc_enable(struct irq_data *d)
writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
}
-static void csky_mpintc_disable(struct irq_data *d)
+static void csky_mpintc_mask(struct irq_data *d)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
@@ -164,8 +164,8 @@ static int csky_irq_set_affinity(struct irq_data *d,
static struct irq_chip csky_irq_chip = {
.name = "C-SKY SMP Intc",
.irq_eoi = csky_mpintc_eoi,
- .irq_enable = csky_mpintc_enable,
- .irq_disable = csky_mpintc_disable,
+ .irq_unmask = csky_mpintc_unmask,
+ .irq_mask = csky_mpintc_mask,
.irq_set_type = csky_mpintc_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = csky_irq_set_affinity,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index cf74cfa82045..259065d271ef 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -163,7 +163,13 @@ static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ if (irqd_irq_masked(d)) {
+ plic_irq_unmask(d);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_irq_mask(d);
+ } else {
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ }
}
static struct irq_chip plic_chip = {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 32be5a03951f..b10f015b2e37 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -161,9 +161,10 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* if it is then we are done, unless there is an
* interrupt-map which takes precedence.
*/
+ bool intc = of_property_read_bool(ipar, "interrupt-controller");
+
imap = of_get_property(ipar, "interrupt-map", &imaplen);
- if (imap == NULL &&
- of_property_read_bool(ipar, "interrupt-controller")) {
+ if (imap == NULL && intc) {
pr_debug(" -> got it !\n");
return 0;
}
@@ -244,8 +245,20 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
pr_debug(" -> imaplen=%d\n", imaplen);
}
- if (!match)
+ if (!match) {
+ if (intc) {
+ /*
+ * The PASEMI Nemo is a known offender, so
+ * let's only warn for anyone else.
+ */
+ WARN(!IS_ENABLED(CONFIG_PPC_PASEMI),
+ "%pOF interrupt-map failed, using interrupt-controller\n",
+ ipar);
+ return 0;
+ }
+
goto fail;
+ }
/*
* Successfully parsed an interrrupt-map translation; copy new
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 12e296d634eb..48e3f4e47b29 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -148,6 +148,9 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s
raw_spinlock_t *lock = &desc->dev->msi_lock;
unsigned long flags;
+ if (!desc->msi_attrib.can_mask)
+ return;
+
raw_spin_lock_irqsave(lock, flags);
desc->msi_mask &= ~clear;
desc->msi_mask |= set;
@@ -181,7 +184,8 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
{
void __iomem *desc_addr = pci_msix_desc_addr(desc);
- writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ if (desc->msi_attrib.can_mask)
+ writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
static inline void pci_msix_mask(struct msi_desc *desc)
@@ -200,23 +204,17 @@ static inline void pci_msix_unmask(struct msi_desc *desc)
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
{
- if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
- return;
-
if (desc->msi_attrib.is_msix)
pci_msix_mask(desc);
- else if (desc->msi_attrib.maskbit)
+ else
pci_msi_mask(desc, mask);
}
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
{
- if (pci_msi_ignore_mask || desc->msi_attrib.is_virtual)
- return;
-
if (desc->msi_attrib.is_msix)
pci_msix_unmask(desc);
- else if (desc->msi_attrib.maskbit)
+ else
pci_msi_unmask(desc, mask);
}
@@ -370,6 +368,11 @@ static void free_msi_irqs(struct pci_dev *dev)
for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i));
+ if (dev->msi_irq_groups) {
+ msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
+ dev->msi_irq_groups = NULL;
+ }
+
pci_msi_teardown_msi_irqs(dev);
list_for_each_entry_safe(entry, tmp, msi_list, list) {
@@ -381,11 +384,6 @@ static void free_msi_irqs(struct pci_dev *dev)
list_del(&entry->list);
free_msi_entry(entry);
}
-
- if (dev->msi_irq_groups) {
- msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
- dev->msi_irq_groups = NULL;
- }
}
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
@@ -479,12 +477,16 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
goto out;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+ /* Lies, damned lies, and MSIs */
+ if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
+ control |= PCI_MSI_FLAGS_MASKBIT;
entry->msi_attrib.is_msix = 0;
entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->msi_attrib.is_virtual = 0;
entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
+ entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
@@ -495,7 +497,7 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* Save the initial mask status */
- if (entry->msi_attrib.maskbit)
+ if (entry->msi_attrib.can_mask)
pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
out:
@@ -639,10 +641,13 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_virtual =
entry->msi_attrib.entry_nr >= vec_count;
+ entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ !entry->msi_attrib.is_virtual;
+
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
- if (!entry->msi_attrib.is_virtual) {
+ if (entry->msi_attrib.can_mask) {
addr = pci_msix_desc_addr(entry);
entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index aedb78c86ddc..003950c738d2 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5851,3 +5851,9 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303,
pci_fixup_pericom_acs_store_forward);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303,
pci_fixup_pericom_acs_store_forward);
+
+static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 49cf6eb222e7..e616f94c7c58 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -148,7 +148,7 @@ struct msi_desc {
u8 is_msix : 1;
u8 multiple : 3;
u8 multi_cap : 3;
- u8 maskbit : 1;
+ u8 can_mask : 1;
u8 is_64 : 1;
u8 is_virtual : 1;
u16 entry_nr;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c8afbee5da4b..d0dba7fd9848 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -233,6 +233,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
/* Don't use Relaxed Ordering for TLPs directed at this device */
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ /* Device does honor MSI masking despite saying otherwise */
+ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 6a5ecee6e567..7f350ae59c5f 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -529,10 +529,10 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
/*
* Checking the first MSI descriptor is sufficient. MSIX supports
- * masking and MSI does so when the maskbit is set.
+ * masking and MSI does so when the can_mask attribute is set.
*/
desc = first_msi_entry(dev);
- return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
+ return desc->msi_attrib.is_msix || desc->msi_attrib.can_mask;
}
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
Powered by blists - more mailing lists