[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <VI1PR04MB443142772665BB29B909DFF4EDB10@VI1PR04MB4431.eurprd04.prod.outlook.com>
Date: Wed, 11 Sep 2019 02:01:50 +0000
From: Peng Ma <peng.ma@....com>
To: Vinod Koul <vkoul@...nel.org>
CC: "dan.j.williams@...el.com" <dan.j.williams@...el.com>,
Leo Li <leoyang.li@....com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"dmaengine@...r.kernel.org" <dmaengine@...r.kernel.org>
Subject: RE: [EXT] Re: [V4 2/2] dmaengine: fsl-dpaa2-qdma: Add NXP dpaa2 qDMA
controller driver for Layerscape SoCs
Hi Vinod,
I send those series patchs(V5) on June 25, 2019. I haven't received any comments yet. Their current state
is "Not Applicable", so please let me know what I need to do next.
Thanks very much for your comments.
Patch link:
https://patchwork.kernel.org/patch/11015035/
https://patchwork.kernel.org/patch/11015033/
Best Regards,
Peng
>-----Original Message-----
>From: Vinod Koul <vkoul@...nel.org>
>Sent: 2019年6月25日 0:46
>To: Peng Ma <peng.ma@....com>
>Cc: dan.j.williams@...el.com; Leo Li <leoyang.li@....com>;
>linux-kernel@...r.kernel.org; dmaengine@...r.kernel.org
>Subject: [EXT] Re: [V4 2/2] dmaengine: fsl-dpaa2-qdma: Add NXP dpaa2 qDMA
>controller driver for Layerscape SoCs
>
>Caution: EXT Email
>
>On 13-06-19, 10:13, Peng Ma wrote:
>> DPPA2(Data Path Acceleration Architecture 2) qDMA supports channel
>> virtualization by allowing DMA
>
>typo virtualization
>
>> jobs to be enqueued into different frame queues.
>> Core can initiate a DMA transaction by preparing a frame
>> descriptor(FD) for each DMA job and enqueuing this job to a frame
>> queue. through a hardware portal. The qDMA
> ^^^
>why this full stop?
>
>> +static struct dpaa2_qdma_comp *
>> +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan) {
>> + struct dpaa2_qdma_comp *comp_temp = NULL;
>> + unsigned long flags;
>> +
>> + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
>> + if (list_empty(&dpaa2_chan->comp_free)) {
>> + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
>> + comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
>> + if (!comp_temp)
>> + goto err;
>> + comp_temp->fd_virt_addr =
>> + dma_pool_alloc(dpaa2_chan->fd_pool,
>GFP_NOWAIT,
>> + &comp_temp->fd_bus_addr);
>> + if (!comp_temp->fd_virt_addr)
>> + goto err_comp;
>> +
>> + comp_temp->fl_virt_addr =
>> + dma_pool_alloc(dpaa2_chan->fl_pool,
>GFP_NOWAIT,
>> + &comp_temp->fl_bus_addr);
>> + if (!comp_temp->fl_virt_addr)
>> + goto err_fd_virt;
>> +
>> + comp_temp->desc_virt_addr =
>> + dma_pool_alloc(dpaa2_chan->sdd_pool,
>GFP_NOWAIT,
>> +
>&comp_temp->desc_bus_addr);
>> + if (!comp_temp->desc_virt_addr)
>> + goto err_fl_virt;
>> +
>> + comp_temp->qchan = dpaa2_chan;
>> + return comp_temp;
>> + }
>> +
>> + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
>> + struct dpaa2_qdma_comp, list);
>> + list_del(&comp_temp->list);
>> + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
>> +
>> + comp_temp->qchan = dpaa2_chan;
>> +
>> + return comp_temp;
>> +
>> +err_fl_virt:
>
>no err logs? how will you know what went wrong?
>
>> +static enum
>> +dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
>> + dma_cookie_t cookie,
>> + struct dma_tx_state *txstate) {
>> + return dma_cookie_status(chan, cookie, txstate);
>
>why not set dma_cookie_status as this callback?
>
>> +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) {
>> + struct dpaa2_qdma_priv_per_prio *ppriv;
>> + struct device *dev = &ls_dev->dev;
>> + struct dpaa2_qdma_priv *priv;
>> + u8 prio_def = DPDMAI_PRIO_NUM;
>> + int err = -EINVAL;
>> + int i;
>> +
>> + priv = dev_get_drvdata(dev);
>> +
>> + priv->dev = dev;
>> + priv->dpqdma_id = ls_dev->obj_desc.id;
>> +
>> + /* Get the handle for the DPDMAI this interface is associate with */
>> + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id,
>&ls_dev->mc_handle);
>> + if (err) {
>> + dev_err(dev, "dpdmai_open() failed\n");
>> + return err;
>> + }
>> + dev_info(dev, "Opened dpdmai object successfully\n");
>
>this is noise in kernel, consider debug level
>
>> +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) {
>> + int err;
>> + int i, num;
>> + struct device *dev = priv->dev;
>> + struct dpaa2_qdma_priv_per_prio *ppriv;
>> + struct dpdmai_rx_queue_cfg rx_queue_cfg;
>> + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
>
>the order is reverse than used in other fn, please stick to one style!
>--
>~Vinod
Powered by blists - more mailing lists