lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 11 Jan 2016 17:04:04 +0200
From:	Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
To:	Mans Rullgard <mans@...sr.com>, Viresh Kumar <vireshk@...nel.org>,
	Dan Williams <dan.j.williams@...el.com>,
	Vinod Koul <vinod.koul@...el.com>, dmaengine@...r.kernel.org,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 2/2] dmaengine: dw: fix cyclic transfer callbacks

On Mon, 2016-01-11 at 13:04 +0000, Mans Rullgard wrote:
> Cyclic transfer callbacks rely on block completion interrupts which
> were
> disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle
> block
> interrupts").  This re-enables block interrupts so the cyclic
> callbacks
> can work.  Other transfer types are not affected as they set the
> INT_EN
> bit only on the last block.
> 
> Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block
> interrupts")
> Signed-off-by: Mans Rullgard <mans@...sr.com>

How did you test that?

>From my understanding the custom stuff that does cyclic interrupts
prepares a set of descriptors per period, which at the end of transfer
will generate XFER interrupt. Next period will go in the same way.

Maybe I missed something.

> ---
> Changes:
> - new patch
> ---
>  drivers/dma/dw/core.c | 21 +++++++++++++++------
>  1 file changed, 15 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
> index af2b92f8501e..b92662722404 100644
> --- a/drivers/dma/dw/core.c
> +++ b/drivers/dma/dw/core.c
> @@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan
> *dwc)
>  
>  	/* Enable interrupts */
>  	channel_set_bit(dw, MASK.XFER, dwc->mask);
> +	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
>  	channel_set_bit(dw, MASK.ERROR, dwc->mask);
>  
>  	dwc->initialized = true;
> @@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
>  
>  /* Called with dwc->lock held and all DMAC interrupts disabled */
>  static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan
> *dwc,
> -		u32 status_err, u32 status_xfer)
> +		u32 status_block, u32 status_err, u32 status_xfer)
>  {
>  	unsigned long flags;
>  
> -	if (dwc->mask) {
> +	if (status_block & dwc->mask) {
>  		void (*callback)(void *param);
>  		void *callback_param;
>  
>  		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period
> llp 0x%08x\n",
>  				channel_readl(dwc, LLP));
> +		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
>  
>  		callback = dwc->cdesc->period_callback;
>  		callback_param = dwc->cdesc->period_callback_param;
> @@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw,
> struct dw_dma_chan *dwc,
>  		channel_writel(dwc, CTL_LO, 0);
>  		channel_writel(dwc, CTL_HI, 0);
>  
> +		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
>  		dma_writel(dw, CLEAR.ERROR, dwc->mask);
>  		dma_writel(dw, CLEAR.XFER, dwc->mask);
>  
> @@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
>  {
>  	struct dw_dma *dw = (struct dw_dma *)data;
>  	struct dw_dma_chan *dwc;
> +	u32 status_block;
>  	u32 status_xfer;
>  	u32 status_err;
>  	int i;
>  
> +	status_block = dma_readl(dw, RAW.BLOCK);
>  	status_xfer = dma_readl(dw, RAW.XFER);
>  	status_err = dma_readl(dw, RAW.ERROR);
>  
> @@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
>  	for (i = 0; i < dw->dma.chancnt; i++) {
>  		dwc = &dw->chan[i];
>  		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
> -			dwc_handle_cyclic(dw, dwc, status_err,
> status_xfer);
> +			dwc_handle_cyclic(dw, dwc, status_block,
> status_err,
> +					status_xfer);
>  		else if (status_err & (1 << i))
>  			dwc_handle_error(dw, dwc);
>  		else if (status_xfer & (1 << i))
> @@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
>  	 * Re-enable interrupts.
>  	 */
>  	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
> +	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
>  	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
>  }
>  
> @@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void
> *dev_id)
>  	 * softirq handler.
>  	 */
>  	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
> +	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
>  
>  	status = dma_readl(dw, STATUS_INT);
> @@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void
> *dev_id)
>  
>  		/* Try to recover */
>  		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
> +		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
>  		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
>  		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
>  		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
> @@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
>  	dma_writel(dw, CFG, 0);
>  
>  	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
> +	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
>  	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
> @@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct
> dma_chan *chan)
>  
>  	/* Disable interrupts */
>  	channel_clear_bit(dw, MASK.XFER, dwc->mask);
> +	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
>  	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
>  
>  	spin_unlock_irqrestore(&dwc->lock, flags);
> @@ -1458,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
>  
>  	dwc_chan_disable(dw, dwc);
>  
> +	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
>  	dma_writel(dw, CLEAR.ERROR, dwc->mask);
>  	dma_writel(dw, CLEAR.XFER, dwc->mask);
>  
> @@ -1546,9 +1558,6 @@ int dw_dma_probe(struct dw_dma_chip *chip,
> struct dw_dma_platform_data *pdata)
>  	/* Force dma off, just in case */
>  	dw_dma_off(dw);
>  
> -	/* Disable BLOCK interrupts as well */
> -	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
> -
>  	/* Create a pool of consistent memory blocks for hardware
> descriptors */
>  	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip-
> >dev,
>  					 sizeof(struct dw_desc), 4,
> 0);

-- 
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
Intel Finland Oy

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ