[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJhJPsXv42e23tyQjA52_my1Au6nP_VdLX3c_yzk5MxadQ95iw@mail.gmail.com>
Date: Sun, 4 Jul 2021 22:45:28 +0800
From: Kelvin Cheung <keguang.zhang@...il.com>
To: Vinod Koul <vkoul@...nel.org>
Cc: dmaengine@...r.kernel.org, linux-mips@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH V4 RESEND] dmaengine: Loongson1: Add Loongson1 dmaengine driver
Vinod Koul <vkoul@...nel.org> 于2021年6月7日周一 下午7:07写道:
>
> On 21-05-21, 07:02, Keguang Zhang wrote:
>
> > +config LOONGSON1_DMA
> > + tristate "Loongson1 DMA support"
> > + depends on MACH_LOONGSON32
>
> Why does it have to do that? The dma driver is generic..
This driver is only available for LOONGSON32 CPUs.
>
> > +static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
> > +{
> > + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
> > +
> > + chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
> > + dchan->device->dev,
> > + sizeof(struct ls1x_dma_lli),
> > + __alignof__(struct ls1x_dma_lli), 0);
> > + if (!chan->desc_pool) {
> > + dev_err(chan2dev(dchan),
> > + "failed to alloc DMA descriptor pool!\n");
>
> This can be dropped, allocators will warn you for the allocation
> failures
will do.
>
> > + return -ENOMEM;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void ls1x_dma_free_desc(struct virt_dma_desc *vdesc)
> > +{
> > + struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vdesc);
> > +
> > + if (desc->nr_descs) {
> > + unsigned int i = desc->nr_descs;
> > + struct ls1x_dma_hwdesc *hwdesc;
> > +
> > + do {
> > + hwdesc = &desc->hwdesc[--i];
> > + dma_pool_free(desc->chan->desc_pool, hwdesc->lli,
> > + hwdesc->phys);
> > + } while (i);
> > + }
> > +
> > + kfree(desc);
> > +}
> > +
> > +static struct ls1x_dma_desc *ls1x_dma_alloc_desc(struct ls1x_dma_chan *chan,
> > + int sg_len)
>
> single line now :)
will do.
>
> > +{
> > + struct ls1x_dma_desc *desc;
> > + struct dma_chan *dchan = &chan->vchan.chan;
> > +
> > + desc = kzalloc(struct_size(desc, hwdesc, sg_len), GFP_NOWAIT);
> > + if (!desc)
> > + dev_err(chan2dev(dchan), "failed to alloc DMA descriptor!\n");
>
> this can be dropped too..
will do.
>
> > +static struct dma_async_tx_descriptor *
> > +ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
> > + unsigned int sg_len,
> > + enum dma_transfer_direction direction,
> > + unsigned long flags, void *context)
> > +{
> > + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
> > + struct dma_slave_config *cfg = &chan->cfg;
> > + struct ls1x_dma_desc *desc;
> > + struct scatterlist *sg;
> > + unsigned int dev_addr, bus_width, cmd, i;
> > +
> > + if (!is_slave_direction(direction)) {
> > + dev_err(chan2dev(dchan), "invalid DMA direction!\n");
> > + return NULL;
> > + }
> > +
> > + dev_dbg(chan2dev(dchan), "sg_len=%d, dir=%s, flags=0x%lx\n", sg_len,
> > + direction == DMA_MEM_TO_DEV ? "to device" : "from device",
> > + flags);
> > +
> > + switch (direction) {
> > + case DMA_MEM_TO_DEV:
> > + dev_addr = cfg->dst_addr;
> > + bus_width = cfg->dst_addr_width;
> > + cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
> > + break;
> > + case DMA_DEV_TO_MEM:
> > + dev_addr = cfg->src_addr;
> > + bus_width = cfg->src_addr_width;
> > + cmd = LS1X_DMA_INT;
> > + break;
> > + default:
> > + dev_err(chan2dev(dchan),
> > + "unsupported DMA transfer mode! %d\n", direction);
> > + return NULL;
>
> will this be ever executed?
just in case.
>
> > +static int ls1x_dma_slave_config(struct dma_chan *dchan,
> > + struct dma_slave_config *config)
> > +{
> > + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
> > +
> > + if (!dchan)
> > + return -EINVAL;
>
> should this not be checked before you dereference this to get chan
will drop this check.
>
> > +static void ls1x_dma_trigger(struct ls1x_dma_chan *chan)
> > +{
> > + struct dma_chan *dchan = &chan->vchan.chan;
> > + struct ls1x_dma_desc *desc;
> > + struct virt_dma_desc *vdesc;
> > + unsigned int val;
> > +
> > + vdesc = vchan_next_desc(&chan->vchan);
> > + if (!vdesc) {
> > + dev_warn(chan2dev(dchan), "No pending descriptor\n");
>
> Hmm, I would not log that... this is called from
> ls1x_dma_issue_pending() and which can be called from client driver but
> previous completion would push and this can find empty queue so it can
> happen quite frequently
will drop this warning.
>
> > +static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
> > +{
> > + struct ls1x_dma_chan *chan = data;
> > + struct dma_chan *dchan = &chan->vchan.chan;
> > +
> > + dev_dbg(chan2dev(dchan), "DMA IRQ %d on channel %d\n", irq, chan->id);
> > + if (!chan->desc) {
> > + dev_warn(chan2dev(dchan),
> > + "DMA IRQ with no active descriptor on channel %d\n",
> > + chan->id);
>
> single line pls
will do.
>
> > + return IRQ_NONE;
> > + }
> > +
> > + spin_lock(&chan->vchan.lock);
> > +
> > + if (chan->desc->type == DMA_CYCLIC) {
> > + vchan_cyclic_callback(&chan->desc->vdesc);
> > + } else {
> > + list_del(&chan->desc->vdesc.node);
> > + vchan_cookie_complete(&chan->desc->vdesc);
> > + chan->desc = NULL;
> > + }
>
> not submitting next txn, defeats the purpose of dma if we dont push txns
> as fast as possible..
will fix it.
>
> > +static struct platform_driver ls1x_dma_driver = {
> > + .probe = ls1x_dma_probe,
> > + .remove = ls1x_dma_remove,
> > + .driver = {
> > + .name = "ls1x-dma",
> > + },
>
> No device tree?
Because the LOONGSON32 platform doesn't support DT yet.
>
> --
> ~Vinod
--
Best regards,
Kelvin Cheung
Powered by blists - more mailing lists