[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0814df45-15b2-4dc3-98fd-8f30befc800a@intel.com>
Date: Wed, 6 Aug 2025 10:12:00 -0700
From: Dave Jiang <dave.jiang@...el.com>
To: Vinicius Costa Gomes <vinicius.gomes@...el.com>,
Vinod Koul <vkoul@...nel.org>, Dan Williams <dan.j.williams@...el.com>,
Fenghua Yu <fenghuay@...dia.com>
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 4/9] dmaengine: idxd: Flush kernel workqueues on Field
Level Reset
On 8/4/25 6:27 PM, Vinicius Costa Gomes wrote:
> When a Field Level Reset (FLR) happens terminate the pending
> descriptors that were issued by in-kernel users and disable the
> interrupts associated with those. They will be re-enabled after FLR
> finishes.
>
> Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@...el.com>
Reviewed-by: Dave Jiang <dave.jiang@...el.com>
> ---
> drivers/dma/idxd/device.c | 24 ++++++++++++++++++++++++
> drivers/dma/idxd/idxd.h | 1 +
> drivers/dma/idxd/irq.c | 5 +++++
> 3 files changed, 30 insertions(+)
>
> diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
> index c599a902767ee9904d75a0510a911596e35a259b..287cf3bf1f5a2efdc9037968e9a4eed506e489c3 100644
> --- a/drivers/dma/idxd/device.c
> +++ b/drivers/dma/idxd/device.c
> @@ -1315,6 +1315,11 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
>
> free_irq(ie->vector, ie);
> idxd_flush_pending_descs(ie);
> +
> + /* The interrupt might have been already released by FLR */
> + if (ie->int_handle == INVALID_INT_HANDLE)
> + return;
> +
> if (idxd->request_int_handles)
> idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
> idxd_device_clear_perm_entry(idxd, ie);
> @@ -1323,6 +1328,25 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
> ie->pasid = IOMMU_PASID_INVALID;
> }
>
> +void idxd_wqs_flush_descs(struct idxd_device *idxd)
> +{
> + struct idxd_wq *wq;
> + int i;
> +
> + for (i = 0; i < idxd->max_wqs; i++) {
> + wq = idxd->wqs[i];
> + if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) {
> + struct idxd_irq_entry *ie = &wq->ie;
> +
> + idxd_flush_pending_descs(ie);
> + if (idxd->request_int_handles)
> + idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
> + idxd_device_clear_perm_entry(idxd, ie);
> + ie->int_handle = INVALID_INT_HANDLE;
> + }
> + }
> +}
> +
> int idxd_wq_request_irq(struct idxd_wq *wq)
> {
> struct idxd_device *idxd = wq->idxd;
> diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
> index 74e6695881e6f1684512601ca2c2ee241aaf0a78..6ccca3c56556dbffe0a7c983a2f11f6c73ff2bfd 100644
> --- a/drivers/dma/idxd/idxd.h
> +++ b/drivers/dma/idxd/idxd.h
> @@ -737,6 +737,7 @@ static inline void idxd_desc_complete(struct idxd_desc *desc,
> int idxd_register_devices(struct idxd_device *idxd);
> void idxd_unregister_devices(struct idxd_device *idxd);
> void idxd_wqs_quiesce(struct idxd_device *idxd);
> +void idxd_wqs_flush_descs(struct idxd_device *idxd);
> bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
> void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
> int idxd_load_iaa_device_defaults(struct idxd_device *idxd);
> diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
> index 74059fe43fafeb930f58db21d3824f62b095b968..26547586fcfaa1b9d244b678bf8e209b7b14d35a 100644
> --- a/drivers/dma/idxd/irq.c
> +++ b/drivers/dma/idxd/irq.c
> @@ -417,6 +417,11 @@ static irqreturn_t idxd_halt(struct idxd_device *idxd)
> } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) {
> idxd->state = IDXD_DEV_HALTED;
> idxd_mask_error_interrupts(idxd);
> + /* Flush all pending descriptors, and disable
> + * interrupts, they will be re-enabled when FLR
> + * concludes.
> + */
> + idxd_wqs_flush_descs(idxd);
> dev_dbg(&idxd->pdev->dev,
> "idxd halted, doing FLR. After FLR, configs are restored\n");
> INIT_WORK(&idxd->work, idxd_device_flr);
>
Powered by blists - more mailing lists