[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221001122148.9158-5-kyarlagadda@nvidia.com>
Date: Sat, 1 Oct 2022 17:51:48 +0530
From: Krishna Yarlagadda <kyarlagadda@...dia.com>
To: <broonie@...nel.org>, <thierry.reding@...il.com>,
<jonathanh@...dia.com>, <linux-spi@...r.kernel.org>,
<linux-tegra@...r.kernel.org>
CC: <skomatineni@...dia.com>, <ldewangan@...dia.com>,
<linux-kernel@...r.kernel.org>,
Krishna Yarlagadda <kyarlagadda@...dia.com>
Subject: [PATCH 5/5] spi: tegra210-quad: native dma support
Enable Native DMA support for Tegra23 & Tegra24
Signed-off-by: Krishna Yarlagadda <kyarlagadda@...dia.com>
---
drivers/spi/spi-tegra210-quad.c | 136 +++++++++++++++++++++++---------
1 file changed, 97 insertions(+), 39 deletions(-)
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 99811509dafa..edecb999a614 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -111,6 +111,9 @@
#define QSPI_DMA_BLK 0x024
#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
+#define QSPI_DMA_MEM_ADDRESS_REG 0x028
+#define QSPI_DMA_HI_ADDRESS_REG 0x02c
+
#define QSPI_TX_FIFO 0x108
#define QSPI_RX_FIFO 0x188
@@ -155,6 +158,9 @@
#define DATA_DIR_TX BIT(0)
#define DATA_DIR_RX BIT(1)
+#define QSPI_DMA_EXT BIT(0)
+#define QSPI_DMA_INT BIT(1)
+
#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
#define CMD_TRANSFER 0
@@ -163,7 +169,7 @@
#define DATA_TRANSFER 3
struct tegra_qspi_soc_data {
- bool has_dma;
+ int has_dma;
bool cmb_xfer_capable;
unsigned int cs_count;
};
@@ -600,17 +606,22 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
- dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
- dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+ if (t->tx_buf)
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ if (t->rx_buf)
+ dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
}
static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
struct dma_slave_config dma_sconfig = { 0 };
+ dma_addr_t rx_dma_phys, tx_dma_phys;
unsigned int len;
u8 dma_burst;
int ret = 0;
u32 val;
+ bool has_ext_dma = (tqspi->soc_data->has_dma &
+ QSPI_DMA_EXT) ? true : false;
if (tqspi->is_packed) {
ret = tegra_qspi_dma_map_xfer(tqspi, t);
@@ -629,23 +640,35 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
len = tqspi->curr_dma_words * 4;
/* set attention level based on length of transfer */
- val = 0;
- if (len & 0xf) {
- val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
- dma_burst = 1;
- } else if (((len) >> 4) & 0x1) {
- val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
- dma_burst = 4;
- } else {
- val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
- dma_burst = 8;
+ if (has_ext_dma) {
+ val = 0;
+ if (len & 0xf) {
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
}
tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
tqspi->dma_control_reg = val;
dma_sconfig.device_fc = true;
- if (tqspi->cur_direction & DATA_DIR_TX) {
+ if ((tqspi->cur_direction & DATA_DIR_TX) && !has_ext_dma) {
+ if (tqspi->is_packed)
+ tx_dma_phys = t->tx_dma;
+ else
+ tx_dma_phys = tqspi->tx_dma_phys;
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ tegra_qspi_writel(tqspi, (tx_dma_phys & 0xffffffff),
+ QSPI_DMA_MEM_ADDRESS_REG);
+ tegra_qspi_writel(tqspi, ((tx_dma_phys >> 32) & 0xff),
+ QSPI_DMA_HI_ADDRESS_REG);
+ } else if ((tqspi->cur_direction & DATA_DIR_TX) && has_ext_dma) {
dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.dst_maxburst = dma_burst;
@@ -663,7 +686,16 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
}
}
- if (tqspi->cur_direction & DATA_DIR_RX) {
+ if ((tqspi->cur_direction & DATA_DIR_RX) && !has_ext_dma) {
+ if (tqspi->is_packed)
+ rx_dma_phys = t->rx_dma;
+ else
+ rx_dma_phys = tqspi->rx_dma_phys;
+ tegra_qspi_writel(tqspi, (rx_dma_phys & 0xffffffff),
+ QSPI_DMA_MEM_ADDRESS_REG);
+ tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
+ QSPI_DMA_HI_ADDRESS_REG);
+ } else if ((tqspi->cur_direction & DATA_DIR_RX) && has_ext_dma) {
dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_sconfig.src_maxburst = dma_burst;
@@ -751,13 +783,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
u32 *dma_buf;
int err;
- dma_chan = dma_request_chan(tqspi->dev, "rx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
+ if (!tqspi->soc_data->has_dma)
+ return -ENODEV;
+
+ if (tqspi->soc_data->has_dma & QSPI_DMA_EXT) {
+ dma_chan = dma_request_chan(tqspi->dev, "rx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
- tqspi->rx_dma_chan = dma_chan;
+ tqspi->rx_dma_chan = dma_chan;
+
+ dma_chan = dma_request_chan(tqspi->dev, "tx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->tx_dma_chan = dma_chan;
+ } else {
+ tqspi->rx_dma_chan = NULL;
+ tqspi->tx_dma_chan = NULL;
+ }
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
@@ -768,14 +816,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
tqspi->rx_dma_buf = dma_buf;
tqspi->rx_dma_phys = dma_phys;
- dma_chan = dma_request_chan(tqspi->dev, "tx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
-
- tqspi->tx_dma_chan = dma_chan;
-
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
err = -ENOMEM;
@@ -1045,6 +1085,8 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_message *msg)
{
bool is_first_msg = true;
+ bool has_ext_dma = (tqspi->soc_data->has_dma &
+ QSPI_DMA_EXT) ? true : false;
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
u8 transfer_phase = 0;
@@ -1109,12 +1151,12 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
ret);
- if (tqspi->is_curr_dma_xfer &&
+ if (tqspi->is_curr_dma_xfer && has_ext_dma &&
(tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all
(tqspi->tx_dma_chan);
- if (tqspi->is_curr_dma_xfer &&
+ if (tqspi->is_curr_dma_xfer && has_ext_dma &&
(tqspi->cur_direction & DATA_DIR_RX))
dmaengine_terminate_all
(tqspi->rx_dma_chan);
@@ -1178,6 +1220,8 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
bool is_first_msg = true;
+ bool has_ext_dma = (tqspi->soc_data->has_dma &
+ QSPI_DMA_EXT) ? true : false;
int ret = 0, val = 0;
msg->status = 0;
@@ -1230,9 +1274,11 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "transfer timeout\n");
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
+ if (tqspi->is_curr_dma_xfer && has_ext_dma &&
+ (tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all(tqspi->tx_dma_chan);
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
+ if (tqspi->is_curr_dma_xfer && has_ext_dma &&
+ (tqspi->cur_direction & DATA_DIR_RX))
dmaengine_terminate_all(tqspi->rx_dma_chan);
tegra_qspi_handle_error(tqspi);
ret = -EIO;
@@ -1365,8 +1411,20 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
unsigned long flags;
long wait_status;
int err = 0;
+ bool has_ext_dma = (tqspi->soc_data->has_dma &
+ QSPI_DMA_EXT) ? true : false;
+
+ if (tqspi->cur_direction & DATA_DIR_TX && !has_ext_dma) {
+ if (tqspi->tx_status)
+ err += 1;
+ }
+
+ if (tqspi->cur_direction & DATA_DIR_RX && !has_ext_dma) {
+ if (tqspi->rx_status)
+ err += 2;
+ }
- if (tqspi->cur_direction & DATA_DIR_TX) {
+ if (tqspi->cur_direction & DATA_DIR_TX && has_ext_dma) {
if (tqspi->tx_status) {
dmaengine_terminate_all(tqspi->tx_dma_chan);
err += 1;
@@ -1381,7 +1439,7 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
}
}
- if (tqspi->cur_direction & DATA_DIR_RX) {
+ if (tqspi->cur_direction & DATA_DIR_RX && has_ext_dma) {
if (tqspi->rx_status) {
dmaengine_terminate_all(tqspi->rx_dma_chan);
err += 2;
@@ -1454,25 +1512,25 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
}
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
- .has_dma = true,
+ .has_dma = QSPI_DMA_EXT,
.cmb_xfer_capable = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
- .has_dma = true,
+ .has_dma = QSPI_DMA_EXT,
.cmb_xfer_capable = true,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
- .has_dma = false,
+ .has_dma = QSPI_DMA_INT,
.cmb_xfer_capable = true,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
- .has_dma = false,
+ .has_dma = QSPI_DMA_INT,
.cmb_xfer_capable = true,
.cs_count = 4,
};
--
2.17.1
Powered by blists - more mailing lists