[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20241113121941.3372131-5-agaur@marvell.com>
Date: Wed, 13 Nov 2024 17:49:41 +0530
From: Anshumali Gaur <agaur@...vell.com>
To: <quic_bjorande@...cinc.com>, <christophe.leroy@...roup.eu>,
<angelogioacchino.delregno@...labora.com>, <herve.codina@...tlin.com>,
<dmitry.baryshkov@...aro.org>, <linux-kernel@...r.kernel.org>,
<arnd@...db.de>, <sgoutham@...vell.com>
CC: Anshumali Gaur <agaur@...vell.com>
Subject: [PATCH v5 4/4] soc: marvell: rvu-pf: Handle function level reset (FLR) IRQs for VFs
Added PCIe FLR interrupt handler for VFs. When FLR is triggered for VFs,
parent PF gets an interrupt. PF creates a mbox message and sends it to
RVU Admin function (AF). AF cleans up all the resources attached to that
specific VF and acks the PF that FLR is handled.
Signed-off-by: Anshumali Gaur <agaur@...vell.com>
---
drivers/soc/marvell/rvu_gen_pf/gen_pf.c | 232 +++++++++++++++++++++++-
drivers/soc/marvell/rvu_gen_pf/gen_pf.h | 7 +
2 files changed, 238 insertions(+), 1 deletion(-)
diff --git a/drivers/soc/marvell/rvu_gen_pf/gen_pf.c b/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
index 027d54c182a5..d99f0064aaf8 100644
--- a/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
+++ b/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
@@ -626,6 +626,15 @@ static void rvu_gen_pf_queue_vf_work(struct mbox *mw, struct workqueue_struct *m
}
}
+static void rvu_gen_pf_flr_wq_destroy(struct gen_pf_dev *pfdev)
+{
+ if (!pfdev->flr_wq)
+ return;
+ destroy_workqueue(pfdev->flr_wq);
+ pfdev->flr_wq = NULL;
+ devm_kfree(pfdev->dev, pfdev->flr_wrk);
+}
+
static irqreturn_t rvu_gen_pf_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct gen_pf_dev *pfdev = (struct gen_pf_dev *)(pf_irq);
@@ -699,6 +708,211 @@ static int rvu_gen_pf_register_pfvf_mbox_intr(struct gen_pf_dev *pfdev, int numv
return 0;
}
+static void rvu_gen_pf_flr_handler(struct work_struct *work)
+{
+ struct flr_work *flrwork = container_of(work, struct flr_work, work);
+ struct gen_pf_dev *pfdev = flrwork->pfdev;
+ struct mbox *mbox = &pfdev->mbox;
+ struct msg_req *req;
+ int vf, reg = 0;
+
+ vf = flrwork - pfdev->flr_wrk;
+
+ mutex_lock(&mbox->lock);
+ req = gen_pf_mbox_alloc_msg_vf_flr(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+ req->hdr.pcifunc &= ~RVU_PFVF_FUNC_MASK;
+ req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ if (!rvu_gen_pf_sync_mbox_msg(&pfdev->mbox)) {
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* clear transcation pending bit */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFTRPENDX(reg));
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(reg));
+ }
+
+ mutex_unlock(&mbox->lock);
+}
+
+static irqreturn_t rvu_gen_pf_me_intr_handler(int irq, void *pf_irq)
+{
+ struct gen_pf_dev *pfdev = (struct gen_pf_dev *)pf_irq;
+ int vf, reg, num_reg = 1;
+ u64 intr;
+
+ if (pfdev->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = readq(pfdev->reg_base + RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* clear trpend bit */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFTRPENDX(reg));
+ /* clear interrupt */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFME_INTX(reg));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_gen_pf_flr_intr_handler(int irq, void *pf_irq)
+{
+ struct gen_pf_dev *pfdev = (struct gen_pf_dev *)pf_irq;
+ int reg, dev, vf, start_vf, num_reg = 1;
+ u64 intr;
+
+ if (pfdev->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = readq(pfdev->reg_base + RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(pfdev->flr_wq, &pfdev->flr_wrk[dev].work);
+ /* Clear interrupt */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INTX(reg));
+ /* Disable the interrupt */
+ writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(reg));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int rvu_gen_pf_register_flr_me_intr(struct gen_pf_dev *pfdev, int numvfs)
+{
+ char *irq_name;
+ int ret;
+
+ /* Register ME interrupt handler*/
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_ME0", rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFME0),
+ rvu_gen_pf_me_intr_handler, 0, irq_name, pfdev);
+
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for ME0\n");
+ }
+
+ /* Register FLR interrupt handler */
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_FLR0", rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFFLR0),
+ rvu_gen_pf_flr_intr_handler, 0, irq_name, pfdev);
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for FLR0\n");
+ return ret;
+ }
+
+ if (numvfs > 64) {
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_ME1",
+ rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pfdev->pdev, RVU_PF_INT_VEC_VFME1),
+ rvu_gen_pf_me_intr_handler, 0, irq_name, pfdev);
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for ME1\n");
+ }
+ irq_name = &pfdev->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "Generic RVUPF%d_FLR1",
+ rvu_get_pf(pfdev->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pfdev->pdev, RVU_PF_INT_VEC_VFFLR1),
+ rvu_gen_pf_flr_intr_handler, 0, irq_name, pfdev);
+ if (ret) {
+ dev_err(pfdev->dev,
+ "Generic RVUPF: IRQ registration failed for FLR1\n");
+ return ret;
+ }
+ }
+
+ /* Enable ME interrupt for all VFs*/
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INTX(0));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1SX(0));
+
+ /* Enable FLR interrupt for all VFs*/
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INTX(0));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(0));
+
+ if (numvfs > 64) {
+ numvfs -= 64;
+
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INTX(1));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1SX(1));
+
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INTX(1));
+ writeq(INTR_MASK(numvfs), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(1));
+ }
+ return 0;
+}
+
+static void rvu_gen_pf_disable_flr_me_intr(struct gen_pf_dev *pfdev)
+{
+ int irq, vfs = pfdev->total_vfs;
+
+ /* Disable VFs ME interrupts */
+ writeq(INTR_MASK(vfs), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1CX(0));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(irq, pfdev);
+
+ /* Disable VFs FLR interrupts */
+ writeq(INTR_MASK(vfs), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(0));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(irq, pfdev);
+
+ if (vfs <= 64)
+ return;
+
+ writeq(INTR_MASK(vfs - 64), pfdev->reg_base + RVU_PF_VFME_INT_ENA_W1CX(1));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(irq, pfdev);
+
+ writeq(INTR_MASK(vfs - 64), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(1));
+ irq = pci_irq_vector(pfdev->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(irq, pfdev);
+}
+
+static int rvu_gen_pf_flr_init(struct gen_pf_dev *pfdev, int num_vfs)
+{
+ int vf;
+
+ pfdev->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
+ if (!pfdev->flr_wq)
+ return -ENOMEM;
+
+ pfdev->flr_wrk = devm_kcalloc(pfdev->dev, num_vfs,
+ sizeof(struct flr_work), GFP_KERNEL);
+ if (!pfdev->flr_wrk) {
+ destroy_workqueue(pfdev->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ pfdev->flr_wrk[vf].pfdev = pfdev;
+ INIT_WORK(&pfdev->flr_wrk[vf].work, rvu_gen_pf_flr_handler);
+ }
+
+ return 0;
+}
+
static int rvu_gen_pf_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct gen_pf_dev *pfdev = pci_get_drvdata(pdev);
@@ -713,11 +927,25 @@ static int rvu_gen_pf_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
+ ret = rvu_gen_pf_flr_init(pfdev, numvfs);
+ if (ret)
+ goto free_intr;
+
+ ret = rvu_gen_pf_register_flr_me_intr(pfdev, numvfs);
+ if (ret)
+ goto free_flr;
+
ret = pci_enable_sriov(pdev, numvfs);
if (ret)
- return ret;
+ goto free_flr_intr;
return numvfs;
+free_flr_intr:
+ rvu_gen_pf_disable_flr_me_intr(pfdev);
+free_flr:
+ rvu_gen_pf_flr_wq_destroy(pfdev);
+free_intr:
+ rvu_gen_pf_disable_pfvf_mbox_intr(pfdev, numvfs);
free_mbox:
rvu_gen_pf_pfvf_mbox_destroy(pfdev);
return ret;
@@ -733,6 +961,8 @@ static int rvu_gen_pf_sriov_disable(struct pci_dev *pdev)
pci_disable_sriov(pdev);
+ rvu_gen_pf_disable_flr_me_intr(pfdev);
+ rvu_gen_pf_flr_wq_destroy(pfdev);
rvu_gen_pf_disable_pfvf_mbox_intr(pfdev, numvfs);
rvu_gen_pf_pfvf_mbox_destroy(pfdev);
diff --git a/drivers/soc/marvell/rvu_gen_pf/gen_pf.h b/drivers/soc/marvell/rvu_gen_pf/gen_pf.h
index ad651b97b661..7aacb84df07a 100644
--- a/drivers/soc/marvell/rvu_gen_pf/gen_pf.h
+++ b/drivers/soc/marvell/rvu_gen_pf/gen_pf.h
@@ -16,6 +16,11 @@
struct gen_pf_dev;
+struct flr_work {
+ struct work_struct work;
+ struct gen_pf_dev *pfdev;
+};
+
struct mbox {
struct otx2_mbox mbox;
struct work_struct mbox_wrk;
@@ -33,6 +38,8 @@ struct gen_pf_dev {
struct device *dev;
void __iomem *reg_base;
char *irq_name;
+ struct workqueue_struct *flr_wq;
+ struct flr_work *flr_wrk;
struct work_struct mbox_wrk;
struct work_struct mbox_wrk_up;
--
2.25.1
Powered by blists - more mailing lists