lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 17 Sep 2018 07:51:36 +0000
From:   Robin Gong <yibin.gong@....com>
To:     Lucas Stach <l.stach@...gutronix.de>, Vinod Koul <vkoul@...nel.org>
CC:     "dmaengine@...r.kernel.org" <dmaengine@...r.kernel.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        dl-linux-imx <linux-imx@....com>,
        "kernel@...gutronix.de" <kernel@...gutronix.de>,
        "patchwork-lst@...gutronix.de" <patchwork-lst@...gutronix.de>
Subject: RE: [PATCH v2 3/4] dmaengine: imx-sdma: implement channel termination
 via worker

> -----Original Message-----
> From: Lucas Stach <l.stach@...gutronix.de>
> Sent: 2018年9月15日 1:06
> To: Vinod Koul <vkoul@...nel.org>
> Cc: dmaengine@...r.kernel.org; linux-kernel@...r.kernel.org; Robin Gong
> <yibin.gong@....com>; dl-linux-imx <linux-imx@....com>;
> kernel@...gutronix.de; patchwork-lst@...gutronix.de
> Subject: [PATCH v2 3/4] dmaengine: imx-sdma: implement channel termination
> via worker
> 
> The dmaengine documentation states that device_terminate_all may be
> asynchronous and need not wait for the active transfers have stopped.
> 
> This allows us to move most of the functionality currently implemented in the
> sdma channel termination function to run in a worker, outside of any atomic
> context. Moving this out of atomic context has two
> benefits: we can now sleep while waiting for the channel to terminate, instead
> of busy waiting and the freeing of the dma descriptors happens with IRQs
> enabled, getting rid of a warning in the dma mapping code.
> 
> As the termination is now asnc, we need to implement the device_synchronize
> dma engine function, which simply waits for the worker to finish its execution.
> 
> Signed-off-by: Lucas Stach <l.stach@...gutronix.de>
> ---
> v2: Keep vchan_get_all_descriptors in the terminate_all call, so the
>     worker doesn't corrupt the next transfer if that's already in
>     the setup process.
> ---
>  drivers/dma/imx-sdma.c | 42 ++++++++++++++++++++++++++++++++----------
>  1 file changed, 32 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index
> 8d2fec8b16cc..da41e8fbf151 100644
> --- a/drivers/dma/imx-sdma.c
> +++ b/drivers/dma/imx-sdma.c
> @@ -32,6 +32,7 @@
>  #include <linux/of_address.h>
>  #include <linux/of_device.h>
>  #include <linux/of_dma.h>
> +#include <linux/workqueue.h>
> 
>  #include <asm/irq.h>
>  #include <linux/platform_data/dma-imx-sdma.h>
> @@ -375,6 +376,8 @@ struct sdma_channel {
>  	u32				shp_addr, per_addr;
>  	enum dma_status			status;
>  	struct imx_dma_data		data;
> +	struct work_struct		terminate_worker;
> +	struct list_head		deferred_desc_list;
>  };
> 
>  #define IMX_DMA_SG_LOOP		BIT(0)
> @@ -1025,31 +1028,47 @@ static int sdma_disable_channel(struct dma_chan
> *chan)
> 
>  	return 0;
>  }
> +static void sdma_channel_terminate(struct work_struct *work) {
> +	struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
> +						  terminate_worker);
> +
> +	/*
> +	 * According to NXP R&D team a delay of one BD SDMA cost time
> +	 * (maximum is 1ms) should be added after disable of the channel
> +	 * bit, to ensure SDMA core has really been stopped after SDMA
> +	 * clients call .device_terminate_all.
> +	 */
> +	usleep_range(1000, 2000);
> +
> +	vchan_dma_desc_free_list(&sdmac->vc, &sdmac->deferred_desc_list);
> +	INIT_LIST_HEAD(&sdmac->deferred_desc_list);
> +}
> 
>  static int sdma_disable_channel_with_delay(struct dma_chan *chan)  {
>  	struct sdma_channel *sdmac = to_sdma_chan(chan);
>  	unsigned long flags;
> -	LIST_HEAD(head);
> 
>  	sdma_disable_channel(chan);
> +
>  	spin_lock_irqsave(&sdmac->vc.lock, flags);
> -	vchan_get_all_descriptors(&sdmac->vc, &head);
> +	vchan_get_all_descriptors(&sdmac->vc, &sdmac->deferred_desc_list);
>  	sdmac->desc = NULL;
>  	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
> -	vchan_dma_desc_free_list(&sdmac->vc, &head);
> 
> -	/*
> -	 * According to NXP R&D team a delay of one BD SDMA cost time
> -	 * (maximum is 1ms) should be added after disable of the channel
> -	 * bit, to ensure SDMA core has really been stopped after SDMA
> -	 * clients call .device_terminate_all.
> -	 */
> -	mdelay(1);
> +	schedule_work(&sdmac->terminate_worker);
> 
>  	return 0;
>  }
> 
> +static void sdma_channel_synchronize(struct dma_chan *chan) {
> +	struct sdma_channel *sdmac = to_sdma_chan(chan);
> +
Please add the below to make sure internal virt dma tasklet killed too.
tasklet_kill(&sdmac->vc.task);
> +	flush_work(&sdmac->terminate_worker);
> +}
> +
>  static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
> {
>  	struct sdma_engine *sdma = sdmac->sdma; @@ -1993,6 +2012,8 @@
> static int sdma_probe(struct platform_device *pdev)
> 
>  		sdmac->channel = i;
>  		sdmac->vc.desc_free = sdma_desc_free;
> +		INIT_WORK(&sdmac->terminate_worker,
> sdma_channel_terminate);
> +		INIT_LIST_HEAD(&sdmac->deferred_desc_list);
>  		/*
>  		 * Add the channel to the DMAC list. Do not add channel 0 though
>  		 * because we need it internally in the SDMA driver. This also means
> @@ -2045,6 +2066,7 @@ static int sdma_probe(struct platform_device
> *pdev)
>  	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
>  	sdma->dma_device.device_config = sdma_config;
>  	sdma->dma_device.device_terminate_all =
> sdma_disable_channel_with_delay;
> +	sdma->dma_device.device_synchronize = sdma_channel_synchronize;
>  	sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
>  	sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
>  	sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
> --
> 2.19.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ