[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ebdbb517-ffa2-422a-989e-a4f19ab0163a@linaro.org>
Date: Fri, 27 Jun 2025 18:03:56 +0100
From: Bryan O'Donoghue <bryan.odonoghue@...aro.org>
To: Vikash Garodia <quic_vgarodia@...cinc.com>,
Dikshita Agarwal <quic_dikshita@...cinc.com>,
Abhinav Kumar <abhinav.kumar@...ux.dev>,
Mauro Carvalho Chehab <mchehab@...nel.org>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>, Conor Dooley <conor+dt@...nel.org>
Cc: linux-media@...r.kernel.org, linux-arm-msm@...r.kernel.org,
devicetree@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 3/5] media: iris: use np_dev as preferred DMA device in
HFI queue management
On 27/06/2025 16:48, Vikash Garodia wrote:
> Update HFI interface queues to use np_dev(preferred non-pixel device)
> for DMA memory allocation and deallocation if available. This allows
> platforms with separate DMA domain for non-pixel to use the appropriate
> device handle when managing HFI queues and SFR regions.
>
> Signed-off-by: Vikash Garodia <quic_vgarodia@...cinc.com>
> ---
> drivers/media/platform/qcom/iris/iris_hfi_queue.c | 20 +++++++++++++-------
> 1 file changed, 13 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
> index fac7df0c4d1aec647aeca275ab19651c9ba23733..a31ebe947f525f0d7c09f8b786939d01b62532c3 100644
> --- a/drivers/media/platform/qcom/iris/iris_hfi_queue.c
> +++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
> @@ -247,24 +247,27 @@ static void iris_hfi_queue_deinit(struct iris_iface_q_info *iface_q)
> int iris_hfi_queues_init(struct iris_core *core)
> {
> struct iris_hfi_queue_table_header *q_tbl_hdr;
> + struct device *dev;
> u32 queue_size;
>
> + dev = core->np_dev ? core->np_dev : core->dev;
> +
dev = core->dev;
if (core->np_dev)
dev = core->np_dev;
Is much easier to read.
> /* Iris hardware requires 4K queue alignment */
> queue_size = ALIGN((sizeof(*q_tbl_hdr) + (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ)), SZ_4K);
> - core->iface_q_table_vaddr = dma_alloc_attrs(core->dev, queue_size,
> + core->iface_q_table_vaddr = dma_alloc_attrs(dev, queue_size,
> &core->iface_q_table_daddr,
> GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
> if (!core->iface_q_table_vaddr) {
> - dev_err(core->dev, "queues alloc and map failed\n");
> + dev_err(dev, "queues alloc and map failed\n");
> return -ENOMEM;
> }
>
> - core->sfr_vaddr = dma_alloc_attrs(core->dev, SFR_SIZE,
> + core->sfr_vaddr = dma_alloc_attrs(dev, SFR_SIZE,
> &core->sfr_daddr,
> GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
> if (!core->sfr_vaddr) {
> - dev_err(core->dev, "sfr alloc and map failed\n");
> - dma_free_attrs(core->dev, sizeof(*q_tbl_hdr), core->iface_q_table_vaddr,
> + dev_err(dev, "sfr alloc and map failed\n");
> + dma_free_attrs(dev, sizeof(*q_tbl_hdr), core->iface_q_table_vaddr,
> core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
> return -ENOMEM;
> }
> @@ -292,6 +295,7 @@ int iris_hfi_queues_init(struct iris_core *core)
>
> void iris_hfi_queues_deinit(struct iris_core *core)
> {
> + struct device *dev;
> u32 queue_size;
>
> if (!core->iface_q_table_vaddr)
> @@ -301,7 +305,9 @@ void iris_hfi_queues_deinit(struct iris_core *core)
> iris_hfi_queue_deinit(&core->message_queue);
> iris_hfi_queue_deinit(&core->command_queue);
>
> - dma_free_attrs(core->dev, SFR_SIZE, core->sfr_vaddr,
> + dev = core->np_dev ? core->np_dev : core->dev;
and again
> +
> + dma_free_attrs(dev, SFR_SIZE, core->sfr_vaddr,
> core->sfr_daddr, DMA_ATTR_WRITE_COMBINE);
>
> core->sfr_vaddr = NULL;
> @@ -310,7 +316,7 @@ void iris_hfi_queues_deinit(struct iris_core *core)
> queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
> (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
>
> - dma_free_attrs(core->dev, queue_size, core->iface_q_table_vaddr,
> + dma_free_attrs(dev, queue_size, core->iface_q_table_vaddr,
> core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
>
> core->iface_q_table_vaddr = NULL;
>
Other than that.
Reviewed-by: Bryan O'Donoghue <bryan.odonoghue@...aro.org>
Powered by blists - more mailing lists