[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231128140818.261541-14-herve.codina@bootlin.com>
Date: Tue, 28 Nov 2023 15:08:12 +0100
From: Herve Codina <herve.codina@...tlin.com>
To: Herve Codina <herve.codina@...tlin.com>,
Qiang Zhao <qiang.zhao@....com>, Li Yang <leoyang.li@....com>,
Jakub Kicinski <kuba@...nel.org>,
Shengjiu Wang <shengjiu.wang@...il.com>,
Xiubo Li <Xiubo.Lee@...il.com>,
Fabio Estevam <festevam@...il.com>,
Nicolin Chen <nicoleotsuka@...il.com>,
Liam Girdwood <lgirdwood@...il.com>,
Mark Brown <broonie@...nel.org>,
Jaroslav Kysela <perex@...ex.cz>,
Takashi Iwai <tiwai@...e.com>,
Christophe Leroy <christophe.leroy@...roup.eu>
Cc: Arnd Bergmann <arnd@...db.de>, linuxppc-dev@...ts.ozlabs.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
alsa-devel@...a-project.org,
Thomas Petazzoni <thomas.petazzoni@...tlin.com>
Subject: [PATCH 13/17] soc: fsl: cpm1: qmc: Split Tx and Rx TSA entries setup
The Tx and Rx entries for a given channel are set in one function.
In order to modify Rx entries and Tx entries independently of one other,
split this function in one for the Rx part and one for the Tx part.
Signed-off-by: Herve Codina <herve.codina@...tlin.com>
Reviewed-by: Christophe Leroy <christophe.leroy@...roup.eu>
---
drivers/soc/fsl/qe/qmc.c | 49 ++++++++++++++++++++++++++++------------
1 file changed, 35 insertions(+), 14 deletions(-)
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index c1318fad296b..5ca4120779f8 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -610,14 +610,14 @@ static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_ser
return 0;
}
-static int qmc_chan_setup_tsa_32rx_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
- bool enable)
+static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
{
unsigned int i;
u16 curr;
u16 val;
- /* Use a Tx 32 entries table and a Rx 32 entries table */
+ /* Use a Rx 32 entries table */
val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
@@ -633,6 +633,30 @@ static int qmc_chan_setup_tsa_32rx_32tx(struct qmc_chan *chan, const struct tsa_
return -EBUSY;
}
}
+
+ /* Set entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Tx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
/* Check entries based on Tx stuff */
for (i = 0; i < info->nb_tx_ts; i++) {
if (!(chan->tx_ts_mask & (((u64)1) << i)))
@@ -646,14 +670,6 @@ static int qmc_chan_setup_tsa_32rx_32tx(struct qmc_chan *chan, const struct tsa_
}
}
- /* Set entries based on Rx stuff */
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
- ~QMC_TSA_WRAP, enable ? val : 0x0000);
- }
/* Set entries based on Tx stuff */
for (i = 0; i < info->nb_tx_ts; i++) {
if (!(chan->tx_ts_mask & (((u64)1) << i)))
@@ -680,9 +696,14 @@ static int qmc_chan_setup_tsa(struct qmc_chan *chan, bool enable)
* Setup one common 64 entries table or two 32 entries (one for Tx
* and one for Tx) according to assigned TS numbers.
*/
- return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
- qmc_chan_setup_tsa_64rxtx(chan, &info, enable) :
- qmc_chan_setup_tsa_32rx_32tx(chan, &info, enable);
+ if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ ret = qmc_chan_setup_tsa_32rx(chan, &info, enable);
+ if (ret)
+ return ret;
+
+ return qmc_chan_setup_tsa_32tx(chan, &info, enable);
}
static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
--
2.42.0
Powered by blists - more mailing lists