[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8f3c9f97d2de59ba73bf3c50f16d262d68ef4b2d.camel@gmail.com>
Date: Sun, 22 Sep 2024 00:49:38 +0200
From: Alexander Sverdlin <alexander.sverdlin@...il.com>
To: Anshumali Gaur <agaur@...vell.com>, conor.dooley@...rochip.com,
ulf.hansson@...aro.org, arnd@...db.de, linus.walleij@...aro.org,
nikita.shubin@...uefel.me, vkoul@...nel.org, cyy@...self.name,
krzysztof.kozlowski@...aro.org, linux-kernel@...r.kernel.org,
sgoutham@...vell.com
Subject: Re: [PATCH 4/4] soc: marvell: rvu-pf: Handle function level reset
(FLR) IRQs for VFs
Hi Anshumali!
On Fri, 2024-09-20 at 16:53 +0530, Anshumali Gaur wrote:
> Added PCIe FLR interrupt handler for VFs. When FLR is triggered for VFs,
> parent PF gets an interrupt. PF creates a mbox message and sends it to
> RVU Admin function (AF). AF cleans up all the resources attached to that
> specific VF and acks the PF that FLR is handled.
>
> Signed-off-by: Anshumali Gaur <agaur@...vell.com>
> ---
[]
> diff --git a/drivers/soc/marvell/rvu_gen_pf/gen_pf.c b/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
> index 624c55123a19..e2e7c11dd85d 100644
> --- a/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
> +++ b/drivers/soc/marvell/rvu_gen_pf/gen_pf.c
> @@ -691,6 +700,211 @@ static int rvu_gen_pf_register_pfvf_mbox_intr(struct gen_pf_dev *pfdev, int numv
> return 0;
> }
>
> +static void rvu_gen_pf_flr_handler(struct work_struct *work)
> +{
> + struct flr_work *flrwork = container_of(work, struct flr_work, work);
> + struct gen_pf_dev *pfdev = flrwork->pfdev;
> + struct mbox *mbox = &pfdev->mbox;
> + struct msg_req *req;
> + int vf, reg = 0;
> +
> + vf = flrwork - pfdev->flr_wrk;
> +
> + mutex_lock(&mbox->lock);
> + req = gen_pf_mbox_alloc_msg_vf_flr(mbox);
So this function want's to be a product of "M" macro from patch 2?
But does it really happen?
> + if (!req) {
> + mutex_unlock(&mbox->lock);
> + return;
> + }
> + req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
Did you mean "req->hdr.pcifunc &= ~RVU_PFVF_FUNC_MASK;"?
> + req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
> +
> + if (!rvu_gen_pf_sync_mbox_msg(&pfdev->mbox)) {
> + if (vf >= 64) {
> + reg = 1;
> + vf = vf - 64;
> + }
> + /* clear transcation pending bit */
> + writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFTRPENDX(reg));
> + writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1SX(reg));
> + }
> +
> + mutex_unlock(&mbox->lock);
> +}
> +
> +static irqreturn_t rvu_gen_pf_me_intr_handler(int irq, void *pf_irq)
> +{
> + struct gen_pf_dev *pfdev = (struct gen_pf_dev *)pf_irq;
> + int vf, reg, num_reg = 1;
> + u64 intr;
> +
> + if (pfdev->total_vfs > 64)
> + num_reg = 2;
> +
> + for (reg = 0; reg < num_reg; reg++) {
> + intr = readq(pfdev->reg_base + RVU_PF_VFME_INTX(reg));
> + if (!intr)
> + continue;
> + for (vf = 0; vf < 64; vf++) {
> + if (!(intr & BIT_ULL(vf)))
> + continue;
> + /* clear trpend bit */
> + writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFTRPENDX(reg));
> + /* clear interrupt */
> + writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFME_INTX(reg));
> + }
> + }
Should anything else have been performed in the IRQ handler besides acknowledging the
IRQ request?
> + return IRQ_HANDLED;
> +}
> +
> +static irqreturn_t rvu_gen_pf_flr_intr_handler(int irq, void *pf_irq)
> +{
> + struct gen_pf_dev *pfdev = (struct gen_pf_dev *)pf_irq;
> + int reg, dev, vf, start_vf, num_reg = 1;
> + u64 intr;
> +
> + if (pfdev->total_vfs > 64)
> + num_reg = 2;
> +
> + for (reg = 0; reg < num_reg; reg++) {
> + intr = readq(pfdev->reg_base + RVU_PF_VFFLR_INTX(reg));
> + if (!intr)
> + continue;
> + start_vf = 64 * reg;
> + for (vf = 0; vf < 64; vf++) {
> + if (!(intr & BIT_ULL(vf)))
> + continue;
> + dev = vf + start_vf;
> + queue_work(pfdev->flr_wq, &pfdev->flr_wrk[dev].work);
> + /* Clear interrupt */
> + writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INTX(reg));
> + /* Disable the interrupt */
> + writeq(BIT_ULL(vf), pfdev->reg_base + RVU_PF_VFFLR_INT_ENA_W1CX(reg));
> + }
> + }
> + return IRQ_HANDLED;
> +}
[]
--
Alexander Sverdlin.
Powered by blists - more mailing lists