lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20141022054238.GA3100@Robin-OptiPlex-780>
Date:	Wed, 22 Oct 2014 13:42:39 +0800
From:	Robin Gong <b38343@...escale.com>
To:	Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
CC:	<vinod.koul@...el.com>, <dan.j.williams@...el.com>,
	<dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v4 3/3] dma: imx-sdma: reorg code to make code clean

Thanks for your comments, I'll send the next verison.
On Tue, Oct 21, 2014 at 12:39:35PM +0300, Andy Shevchenko wrote:
> On Tue, 2014-10-21 at 09:09 +0800, Robin Gong wrote:
> > code reorg for transfer prepare and bus width check.
> 
> Fix style of commit message.
> 
> > 
> > Signed-off-by: Robin Gong <b38343@...escale.com>
> > ---
> >  drivers/dma/imx-sdma.c | 127 +++++++++++++++++++++++--------------------------
> >  1 file changed, 60 insertions(+), 67 deletions(-)
> > 
> > diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
> > index 7e8aa2d..b0365c2 100644
> > --- a/drivers/dma/imx-sdma.c
> > +++ b/drivers/dma/imx-sdma.c
> > @@ -1026,6 +1026,52 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
> >  	clk_disable(sdma->clk_ahb);
> >  }
> >  
> > +static int sdma_transfer_init(struct sdma_channel *sdmac,
> > +			      enum dma_transfer_direction direction)
> > +{
> > +	int ret = 0;
> 
> Redundant assignment.
> 
> > +
> > +	sdmac->status = DMA_IN_PROGRESS;
> > +	sdmac->buf_tail = 0;
> > +	sdmac->flags = 0;
> > +	sdmac->direction = direction;
> > +
> > +	ret = sdma_load_context(sdmac);
> > +	if (ret)
> > +		return ret;
> > +
> > +	sdmac->chn_count = 0;
> > +
> > +	return ret;
> > +}
> > +
> > +static int check_bd_buswidth(struct sdma_buffer_descriptor *bd,
> > +			     struct sdma_channel *sdmac, int count,
> > +			     dma_addr_t dma_dst, dma_addr_t dma_src)
> > +{
> > +	int ret = 0;
> > +
> > +	switch (sdmac->word_size) {
> > +	case DMA_SLAVE_BUSWIDTH_4_BYTES:
> > +		bd->mode.command = 0;
> > +		if ((count | dma_dst | dma_src) & 3)
> > +			ret = -EINVAL;
> > +		break;
> > +	case DMA_SLAVE_BUSWIDTH_2_BYTES:
> > +		bd->mode.command = 2;
> > +		if ((count | dma_dst | dma_src) & 1)
> > +			ret = -EINVAL;
> > +		break;
> > +	case DMA_SLAVE_BUSWIDTH_1_BYTE:
> > +		 bd->mode.command = 1;
> > +		 break;
> > +	default:
> > +		 return -EINVAL;
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> >  static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> >  		struct dma_chan *chan, dma_addr_t dma_dst,
> >  		dma_addr_t dma_src, size_t len, unsigned long flags)
> > @@ -1034,7 +1080,7 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> >  	struct sdma_engine *sdma = sdmac->sdma;
> >  	int channel = sdmac->channel;
> >  	size_t count;
> > -	int i = 0, param, ret;
> > +	int i = 0, param;
> >  	struct sdma_buffer_descriptor *bd;
> >  
> >  	if (!chan || !len || sdmac->status == DMA_IN_PROGRESS)
> > @@ -1046,21 +1092,12 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> >  		goto err_out;
> >  	}
> >  
> > -	sdmac->status = DMA_IN_PROGRESS;
> > -
> > -	sdmac->buf_tail = 0;
> > -
> >  	dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
> >  		&dma_src, &dma_dst, len, channel);
> >  
> > -	sdmac->direction = DMA_MEM_TO_MEM;
> > -
> > -	ret = sdma_load_context(sdmac);
> > -	if (ret)
> > +	if (sdma_transfer_init(sdmac, DMA_MEM_TO_MEM))
> >  		goto err_out;
> >  
> > -	sdmac->chn_count = 0;
> > -
> >  	do {
> >  		count = min_t(size_t, len, SDMA_BD_MAX_CNT);
> >  		bd = &sdmac->bd[i];
> > @@ -1068,28 +1105,8 @@ static struct dma_async_tx_descriptor *sdma_prep_memcpy(
> >  		bd->ext_buffer_addr = dma_dst;
> >  		bd->mode.count = count;
> >  
> > -		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
> > -			ret =  -EINVAL;
> > +		if (check_bd_buswidth(bd, sdmac, count, dma_dst, dma_src))
> >  			goto err_out;
> > -		}
> > -
> > -		switch (sdmac->word_size) {
> > -		case DMA_SLAVE_BUSWIDTH_4_BYTES:
> > -			bd->mode.command = 0;
> > -			if ((count | dma_dst | dma_src) & 3)
> > -				return NULL;
> > -			break;
> > -		case DMA_SLAVE_BUSWIDTH_2_BYTES:
> > -			bd->mode.command = 2;
> > -			if ((count | dma_dst | dma_src) & 1)
> > -				return NULL;
> > -			break;
> > -		case DMA_SLAVE_BUSWIDTH_1_BYTE:
> > -			bd->mode.command = 1;
> > -			break;
> > -		default:
> > -			return NULL;
> > -		}
> >  
> >  		dma_src += count;
> >  		dma_dst += count;
> > @@ -1141,21 +1158,10 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
> >  
> >  	if (sdmac->status == DMA_IN_PROGRESS)
> >  		return NULL;
> > -	sdmac->status = DMA_IN_PROGRESS;
> > -
> > -	sdmac->flags = 0;
> > -
> > -	sdmac->buf_tail = 0;
> >  
> >  	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
> >  			src_nents, channel);
> >  
> > -	sdmac->direction = direction;
> > -
> > -	ret = sdma_load_context(sdmac);
> > -	if (ret)
> > -		goto err_out;
> > -
> >  	if (src_nents > NUM_BD) {
> >  		dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
> >  				channel, src_nents, NUM_BD);
> > @@ -1163,7 +1169,9 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
> >  		goto err_out;
> >  	}
> >  
> > -	sdmac->chn_count = 0;
> > +	if (sdma_transfer_init(sdmac, direction))
> > +		goto err_out;
> > +
> >  	for_each_sg(src_sg, sg_src, src_nents, i) {
> >  		struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
> >  		int param;
> > @@ -1187,30 +1195,15 @@ static struct dma_async_tx_descriptor *sdma_prep_sg(
> >  		bd->mode.count = count;
> >  		sdmac->chn_count += count;
> >  
> > -		if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
> > -			ret =  -EINVAL;
> > +		if (direction == DMA_MEM_TO_MEM)
> > +			ret = check_bd_buswidth(bd, sdmac, count,
> > +						sg_dst->dma_address,
> > +						sg_src->dma_address);
> > +		else
> > +			ret = check_bd_buswidth(bd, sdmac, count, 0,
> > +						sg_src->dma_address);
> > +		if (ret)
> >  			goto err_out;
> > -		}
> > -
> > -		switch (sdmac->word_size) {
> > -		case DMA_SLAVE_BUSWIDTH_4_BYTES:
> > -			bd->mode.command = 0;
> > -			if ((count | sg_src->dma_address | (sg_dst &&
> > -				(sg_dst->dma_address))) & 3)
> > -				return NULL;
> > -			break;
> > -		case DMA_SLAVE_BUSWIDTH_2_BYTES:
> > -			bd->mode.command = 2;
> > -			if ((count | sg_src->dma_address |
> > -				(sg_dst && (sg_dst->dma_address))) & 1)
> > -				return NULL;
> > -			break;
> > -		case DMA_SLAVE_BUSWIDTH_1_BYTE:
> > -			bd->mode.command = 1;
> > -			break;
> > -		default:
> > -			return NULL;
> > -		}
> >  
> >  		param = BD_DONE | BD_EXTD | BD_CONT;
> >  
> 
> 
> -- 
> Andy Shevchenko <andriy.shevchenko@...el.com>
> Intel Finland Oy
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ