[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ba109465-d7ee-09cb-775b-9b702a3910b0@gmail.com>
Date: Sun, 30 Jan 2022 13:05:36 +0300
From: Dmitry Osipenko <digetx@...il.com>
To: Akhil R <akhilrajeev@...dia.com>, devicetree@...r.kernel.org,
dmaengine@...r.kernel.org, jonathanh@...dia.com,
kyarlagadda@...dia.com, ldewangan@...dia.com,
linux-kernel@...r.kernel.org, linux-tegra@...r.kernel.org,
p.zabel@...gutronix.de, rgumasta@...dia.com, robh+dt@...nel.org,
thierry.reding@...il.com, vkoul@...nel.org
Cc: Pavan Kunapuli <pkunapuli@...dia.com>
Subject: Re: [PATCH v17 2/4] dmaengine: tegra: Add tegra gpcdma driver
29.01.2022 19:40, Akhil R пишет:
> +static int tegra_dma_device_pause(struct dma_chan *dc)
> +{
> + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
> + unsigned long wcount, flags;
> + int ret = 0;
> +
> + if (!tdc->tdma->chip_data->hw_support_pause)
> + return 0;
It's wrong to return zero if pause unsupported, please see what
dmaengine_pause() returns.
> +
> + spin_lock_irqsave(&tdc->vc.lock, flags);
> + if (!tdc->dma_desc)
> + goto out;
> +
> + ret = tegra_dma_pause(tdc);
> + if (ret) {
> + dev_err(tdc2dev(tdc), "DMA pause timed out\n");
> + goto out;
> + }
> +
> + wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
> + tdc->dma_desc->bytes_xfer +=
> + tdc->dma_desc->bytes_req - (wcount * 4);
Why transfer is accumulated?
Why do you need to update xfer size at all on pause?
> +
> +out:
> + spin_unlock_irqrestore(&tdc->vc.lock, flags);
> +
> + return ret;
> +}
Still nothing prevents interrupt handler to fire during the pause.
What you actually need to do is to disable/enable interrupt. This will
prevent the interrupt racing and then pause/resume may look like this:
static int tegra_dma_device_resume(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
u32 val;
if (!tdc->tdma->chip_data->hw_support_pause)
return -ENOSYS;
if (!tdc->dma_desc)
return 0;
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
enable_irq(tdc->irq);
return 0;
}
static int tegra_dma_device_pause(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
u32 val;
int ret;
if (!tdc->tdma->chip_data->hw_support_pause)
return -ENOSYS;
disable_irq(tdc->irq);
if (!tdc->dma_desc)
return 0;
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
/* Wait until busy bit is de-asserted */
ret = readl_relaxed_poll_timeout_atomic(
tdc->chan_base + TEGRA_GPCDMA_CHAN_STATUS,
val, !(val & TEGRA_GPCDMA_STATUS_BUSY),
TEGRA_GPCDMA_BURST_COMPLETE_TIME,
TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
if (ret) {
dev_err(tdc2dev(tdc), "DMA pause timed out: %d\n", ret);
tegra_dma_device_resume(dc);
}
return ret;
}
Powered by blists - more mailing lists