[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAPYmKFtHn+ggujCWeQoSaWPK-2G=-Om0DuCpFyf+ha+OXQfsnw@mail.gmail.com>
Date: Tue, 10 Dec 2024 16:48:34 +0800
From: Xu Lu <luxu.kernel@...edance.com>
To: yunhui cui <cuiyunhui@...edance.com>
Cc: Zong Li <zong.li@...ive.com>, joro@...tes.org, will@...nel.org,
robin.murphy@....com, tjeznach@...osinc.com, paul.walmsley@...ive.com,
palmer@...belt.com, aou@...s.berkeley.edu, jgg@...pe.ca, kevin.tian@...el.com,
linux-kernel@...r.kernel.org, iommu@...ts.linux.dev,
linux-riscv@...ts.infradead.org
Subject: Re: [External] [RFC PATCH v2 02/10] iommu/riscv: support HPM and
interrupt handling
Hi Zong Li,
Thanks for your job. We have tested your iommu pmu driver and have
some feedbacks.
1. Maybe it is better to clear ipsr.PMIP first and then handle the pmu
ovf irq in riscv_iommu_hpm_irq_handler(). Otherwise, if a new overflow
happens after the riscv_iommu_pmu_handle_irq() and before pmip clear,
we will drop it.
2. The period_left can be messed in riscv_iommu_pmu_update() as
riscv_iommu_pmu_get_counter() always return the whole register value
while bit 63 in hpmcycle actually indicates whether overflow happens
instead of current value. Maybe these two functions should be
implemented as:
static void riscv_iommu_pmu_set_counter(struct riscv_iommu_pmu *pmu, u32 idx,
u64 value)
{
void __iomem *addr = pmu->reg + RISCV_IOMMU_REG_IOHPMCYCLES;
if (WARN_ON_ONCE(idx < 0 || idx > pmu->num_counters))
return;
if (idx == 0)
value = (value & ~RISCV_IOMMU_IOHPMCYCLES_OF) |
(readq(addr) & RISCV_IOMMU_IOHPMCYCLES_OF);
writeq(FIELD_PREP(RISCV_IOMMU_IOHPMCTR_COUNTER, value), addr + idx * 8);
}
static u64 riscv_iommu_pmu_get_counter(struct riscv_iommu_pmu *pmu, u32 idx)
{
void __iomem *addr = pmu->reg + RISCV_IOMMU_REG_IOHPMCYCLES;
u64 value;
if (WARN_ON_ONCE(idx < 0 || idx > pmu->num_counters))
return -EINVAL;
value = readq(addr + idx * 8);
if (idx == 0)
return FIELD_GET(RISCV_IOMMU_IOHPMCYCLES_COUNTER, value);
return FIELD_GET(RISCV_IOMMU_IOHPMCTR_COUNTER, value);
}
Please ignore me if these issues have already been discussed.
Best regards,
Xu Lu
On Tue, Dec 10, 2024 at 3:55 PM yunhui cui <cuiyunhui@...edance.com> wrote:
>
> Add Luxu in the loop.
>
> On Fri, Jun 14, 2024 at 10:22 PM Zong Li <zong.li@...ive.com> wrote:
> >
> > This patch initialize the pmu stuff and uninitialize it when driver
> > removing. The interrupt handling is also provided, this handler need to
> > be primary handler instead of thread function, because pt_regs is empty
> > when threading the IRQ, but pt_regs is necessary by perf_event_overflow.
> >
> > Signed-off-by: Zong Li <zong.li@...ive.com>
> > ---
> > drivers/iommu/riscv/iommu.c | 65 +++++++++++++++++++++++++++++++++++++
> > 1 file changed, 65 insertions(+)
> >
> > diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
> > index 8b6a64c1ad8d..1716b2251f38 100644
> > --- a/drivers/iommu/riscv/iommu.c
> > +++ b/drivers/iommu/riscv/iommu.c
> > @@ -540,6 +540,62 @@ static irqreturn_t riscv_iommu_fltq_process(int irq, void *data)
> > return IRQ_HANDLED;
> > }
> >
> > +/*
> > + * IOMMU Hardware performance monitor
> > + */
> > +
> > +/* HPM interrupt primary handler */
> > +static irqreturn_t riscv_iommu_hpm_irq_handler(int irq, void *dev_id)
> > +{
> > + struct riscv_iommu_device *iommu = (struct riscv_iommu_device *)dev_id;
> > +
> > + /* Process pmu irq */
> > + riscv_iommu_pmu_handle_irq(&iommu->pmu);
> > +
> > + /* Clear performance monitoring interrupt pending */
> > + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, RISCV_IOMMU_IPSR_PMIP);
> > +
> > + return IRQ_HANDLED;
> > +}
> > +
> > +/* HPM initialization */
> > +static int riscv_iommu_hpm_enable(struct riscv_iommu_device *iommu)
> > +{
> > + int rc;
> > +
> > + if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_HPM))
> > + return 0;
> > +
> > + /*
> > + * pt_regs is empty when threading the IRQ, but pt_regs is necessary
> > + * by perf_event_overflow. Use primary handler instead of thread
> > + * function for PM IRQ.
> > + *
> > + * Set the IRQF_ONESHOT flag because this IRQ might be shared with
> > + * other threaded IRQs by other queues.
> > + */
> > + rc = devm_request_irq(iommu->dev,
> > + iommu->irqs[riscv_iommu_queue_vec(iommu, RISCV_IOMMU_IPSR_PMIP)],
> > + riscv_iommu_hpm_irq_handler, IRQF_ONESHOT | IRQF_SHARED, NULL, iommu);
> > + if (rc)
> > + return rc;
> > +
> > + return riscv_iommu_pmu_init(&iommu->pmu, iommu->reg, dev_name(iommu->dev));
> > +}
> > +
> > +/* HPM uninitialization */
> > +static void riscv_iommu_hpm_disable(struct riscv_iommu_device *iommu)
> > +{
> > + if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_HPM))
> > + return;
> > +
> > + devm_free_irq(iommu->dev,
> > + iommu->irqs[riscv_iommu_queue_vec(iommu, RISCV_IOMMU_IPSR_PMIP)],
> > + iommu);
> > +
> > + riscv_iommu_pmu_uninit(&iommu->pmu);
> > +}
> > +
> > /* Lookup and initialize device context info structure. */
> > static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iommu,
> > unsigned int devid)
> > @@ -1612,6 +1668,9 @@ void riscv_iommu_remove(struct riscv_iommu_device *iommu)
> > riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF);
> > riscv_iommu_queue_disable(&iommu->cmdq);
> > riscv_iommu_queue_disable(&iommu->fltq);
> > +
> > + if (iommu->caps & RISCV_IOMMU_CAPABILITIES_HPM)
> > + riscv_iommu_pmu_uninit(&iommu->pmu);
> > }
> >
> > int riscv_iommu_init(struct riscv_iommu_device *iommu)
> > @@ -1651,6 +1710,10 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
> > if (rc)
> > goto err_queue_disable;
> >
> > + rc = riscv_iommu_hpm_enable(iommu);
> > + if (rc)
> > + goto err_hpm_disable;
> > +
> > rc = iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "riscv-iommu@%s",
> > dev_name(iommu->dev));
> > if (rc) {
> > @@ -1669,6 +1732,8 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
> > err_remove_sysfs:
> > iommu_device_sysfs_remove(&iommu->iommu);
> > err_iodir_off:
> > + riscv_iommu_hpm_disable(iommu);
> > +err_hpm_disable:
> > riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF);
> > err_queue_disable:
> > riscv_iommu_queue_disable(&iommu->fltq);
> > --
> > 2.17.1
> >
> >
>
> Thanks,
> Yunhui
Powered by blists - more mailing lists