lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Fri, 7 Feb 2020 17:10:23 +0200
From:   Peter Ujfalusi <peter.ujfalusi@...com>
To:     <vkoul@...nel.org>
CC:     <dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <dan.j.williams@...el.com>
Subject: Re: [PATCH] dmaengine: ti: edma: Support for interleaved mem to mem
 transfer

Hi,

On 07/02/2020 16.20, Peter Ujfalusi wrote:
> Add basic interleaved support via EDMA.
> 
> Signed-off-by: Peter Ujfalusi <peter.ujfalusi@...com>
> ---
>  drivers/dma/ti/edma.c | 80 +++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 80 insertions(+)
> 
> diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
> index 03a7f647f7b2..c291e72260bd 100644
> --- a/drivers/dma/ti/edma.c
> +++ b/drivers/dma/ti/edma.c
> @@ -1275,6 +1275,82 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
>  	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
>  }
>  
> +static struct dma_async_tx_descriptor *
> +edma_prep_dma_interleaved(struct dma_chan *chan,
> +			  struct dma_interleaved_template *xt,
> +			  unsigned long tx_flags)
> +{
> +	struct device *dev = chan->device->dev;
> +	struct edma_chan *echan = to_edma_chan(chan);
> +	struct edmacc_param *param;
> +	struct edma_desc *edesc;
> +	size_t src_icg, dst_icg;
> +	int src_bidx, dst_bidx;
> +
> +	/* Slave mode is not supported */
> +	if (is_slave_direction(xt->dir))
> +		return NULL;
> +
> +	if (xt->frame_size != 1 || xt->numf == 0)
> +		return NULL;
> +
> +	if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
> +		return NULL;
> +
> +	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
> +	if (src_icg) {
> +		src_bidx = src_icg + xt->sgl[0].size;
> +	} else if (xt->src_inc) {
> +		src_bidx = xt->sgl[0].size;
> +	} else {
> +		dev_err(dev, "%s: SRC constant addressing is not supported\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
> +	if (dst_icg) {
> +		dst_bidx = dst_icg + xt->sgl[0].size;
> +	} else if (xt->dst_inc) {
> +		dst_bidx = xt->sgl[0].size;
> +	} else {
> +		dev_err(dev, "%s: DST constant addressing is not supported\n",
> +			__func__);
> +		return NULL;
> +	}
> +
> +	if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
> +		return NULL;
> +
> +	edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
> +	if (!edesc)
> +		return NULL;
> +
> +	edesc->direction = DMA_MEM_TO_MEM;
> +	edesc->echan = echan;
> +	edesc->pset_nr = 1;
> +
> +	param = &edesc->pset[0].param;
> +
> +	param->src = xt->src_start;
> +	param->dst = xt->dst_start;
> +	param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
> +	param->ccnt = 1;
> +	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
> +	param->src_dst_cidx = 0;
> +	param->link_bcntrld = 0xffffffff;

The BCNTRLD should be 0 and only the link needs to be 0xffff.
BCNTRLD basically a don't care in this setup as CCNT is 1, but to be
precise it is better to leave it as 0.

I'll resend the patch on Monday.

> +
> +	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
> +	param->opt |= ITCCHEN;
> +	/* Enable transfer complete interrupt if requested */
> +	if (tx_flags & DMA_PREP_INTERRUPT)
> +		param->opt |= TCINTEN;
> +	else
> +		edesc->polled = true;
> +
> +	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
> +}
> +
>  static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
>  	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
>  	size_t period_len, enum dma_transfer_direction direction,
> @@ -1917,7 +1993,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
>  			 "Legacy memcpy is enabled, things might not work\n");
>  
>  		dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
> +		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
>  		s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
> +		s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
>  		s_ddev->directions = BIT(DMA_MEM_TO_MEM);
>  	}
>  
> @@ -1953,8 +2031,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
>  
>  		dma_cap_zero(m_ddev->cap_mask);
>  		dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
> +		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
>  
>  		m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
> +		m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
>  		m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
>  		m_ddev->device_free_chan_resources = edma_free_chan_resources;
>  		m_ddev->device_issue_pending = edma_issue_pending;
> 

- Péter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ