[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240621080201.305471-20-mkl@pengutronix.de>
Date: Fri, 21 Jun 2024 09:48:39 +0200
From: Marc Kleine-Budde <mkl@...gutronix.de>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net,
kuba@...nel.org,
linux-can@...r.kernel.org,
kernel@...gutronix.de,
Martin Jocic <martin.jocic@...ser.com>,
Marc Kleine-Budde <mkl@...gutronix.de>
Subject: [PATCH net-next 19/24] can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the ISR
From: Martin Jocic <martin.jocic@...ser.com>
A new interrupt is triggered by resetting the DMA RX buffers.
Since MSI interrupts are faster than legacy interrupts, the reset
of the DMA buffers must be moved to the very end of the ISR,
otherwise a new MSI interrupt will be masked by the current one.
Signed-off-by: Martin Jocic <martin.jocic@...ser.com>
Link: https://lore.kernel.org/all/20240620181320.235465-2-martin.jocic@kvaser.com
Signed-off-by: Marc Kleine-Budde <mkl@...gutronix.de>
---
drivers/net/can/kvaser_pciefd.c | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 24871c276b31..b4ffd56fdeff 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -1640,23 +1640,15 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
return res;
}
-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
kvaser_pciefd_read_buffer(pcie, 0);
- /* Reset DMA buffer 0 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
kvaser_pciefd_read_buffer(pcie, 1);
- /* Reset DMA buffer 1 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
@@ -1665,6 +1657,7 @@ static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+ return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1692,19 +1685,32 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 srb_irq = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
if (pci_irq & irq_mask->kcan_rx0)
- kvaser_pciefd_receive_irq(pcie);
+ srb_irq = kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ /* Reset DMA buffer 0, may trigger new interrupt */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ /* Reset DMA buffer 1, may trigger new interrupt */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
return IRQ_HANDLED;
}
--
2.43.0
Powered by blists - more mailing lists