[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181211151647.662325438@linuxfoundation.org>
Date: Tue, 11 Dec 2018 16:41:43 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Lucas Stach <l.stach@...gutronix.de>,
Robin Gong <yibin.gong@....com>, Vinod Koul <vkoul@...nel.org>
Subject: [PATCH 4.19 084/118] Revert "dmaengine: imx-sdma: alloclate bd memory from dma pool"
4.19-stable review patch. If anyone has any objections, please let me know.
------------------
From: Lucas Stach <l.stach@...gutronix.de>
commit ebb853b1bd5f659b92c71dc6a9de44cfc37c78c0 upstream.
This reverts commit fe5b85c656bc. The SDMA engine needs the descriptors to
be contiguous in memory. As the dma pool API is only able to provide a
single descriptor per alloc invocation there is no guarantee that multiple
descriptors satisfy this requirement. Also the code in question is broken
as it only allocates memory for a single descriptor, without looking at the
number of descriptors required for the transfer, leading to out-of-bounds
accesses when the descriptors are written.
Signed-off-by: Lucas Stach <l.stach@...gutronix.de>
Signed-off-by: Robin Gong <yibin.gong@....com>
Cc: stable <stable@...r.kernel.org>
Signed-off-by: Vinod Koul <vkoul@...nel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/dma/imx-sdma.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -376,7 +375,6 @@ struct sdma_channel {
u32 shp_addr, per_addr;
enum dma_status status;
struct imx_dma_data data;
- struct dma_pool *bd_pool;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -1192,10 +1190,11 @@ out:
static int sdma_alloc_bd(struct sdma_desc *desc)
{
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_ATOMIC,
- &desc->bd_phys);
+ desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+ GFP_ATOMIC);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1206,7 +1205,9 @@ out:
static void sdma_free_bd(struct sdma_desc *desc)
{
- dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+
+ dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1272,10 +1273,6 @@ static int sdma_alloc_chan_resources(str
if (ret)
goto disable_clk_ahb;
- sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
- sizeof(struct sdma_buffer_descriptor),
- 32, 0);
-
return 0;
disable_clk_ahb:
@@ -1304,9 +1301,6 @@ static void sdma_free_chan_resources(str
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
-
- dma_pool_destroy(sdmac->bd_pool);
- sdmac->bd_pool = NULL;
}
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
Powered by blists - more mailing lists