[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180914170615.12659-2-l.stach@pengutronix.de>
Date: Fri, 14 Sep 2018 19:06:13 +0200
From: Lucas Stach <l.stach@...gutronix.de>
To: Vinod Koul <vkoul@...nel.org>
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
Robin Gong <yibin.gong@....com>, linux-imx@....com,
kernel@...gutronix.de, patchwork-lst@...gutronix.de
Subject: [PATCH v2 2/4] Revert "dmaengine: imx-sdma: alloclate bd memory from dma pool"
This reverts commit fe5b85c656bc. The SDMA engine needs the descriptors to
be contiguous in memory. As the dma pool API is only able to provide a
single descriptor per alloc invocation there is no guarantee that multiple
descriptors satisfy this requirement. Also the code in question is broken
as it only allocates memory for a single descriptor, without looking at the
number of descriptors required for the transfer, leading to out-of-bounds
accesses when the descriptors are written.
Signed-off-by: Lucas Stach <l.stach@...gutronix.de>
---
drivers/dma/imx-sdma.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 3bca5e0c715f..8d2fec8b16cc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -376,7 +375,6 @@ struct sdma_channel {
u32 shp_addr, per_addr;
enum dma_status status;
struct imx_dma_data data;
- struct dma_pool *bd_pool;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -1192,10 +1190,11 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
static int sdma_alloc_bd(struct sdma_desc *desc)
{
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_ATOMIC,
- &desc->bd_phys);
+ desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+ GFP_ATOMIC);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1206,7 +1205,9 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
static void sdma_free_bd(struct sdma_desc *desc)
{
- dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+
+ dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1272,10 +1273,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto disable_clk_ahb;
- sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
- sizeof(struct sdma_buffer_descriptor),
- 32, 0);
-
return 0;
disable_clk_ahb:
@@ -1304,9 +1301,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
-
- dma_pool_destroy(sdmac->bd_pool);
- sdmac->bd_pool = NULL;
}
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
--
2.19.0
Powered by blists - more mailing lists