[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250612071521.3116831-9-s-adivi@ti.com>
Date: Thu, 12 Jun 2025 12:45:12 +0530
From: Sai Sree Kartheek Adivi <s-adivi@...com>
To: Peter Ujfalusi <peter.ujfalusi@...il.com>, Vinod Koul <vkoul@...nel.org>,
Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>, Nishanth Menon <nm@...com>,
Santosh
Shilimkar <ssantosh@...nel.org>,
Sai Sree Kartheek Adivi <s-adivi@...com>, <dmaengine@...r.kernel.org>,
<devicetree@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>, <praneeth@...com>,
<vigneshr@...com>, <u-kumar1@...com>, <a-chavda@...com>,
<p-mantena@...com>
Subject: [PATCH v2 08/17] dmaengine: ti: k3-udma: move resource management functions to k3-udma-common.c
Move functions responsible for allocation and release of udma
resources such as channels, rings and flows from k3-udma.c
to the common k3-udma-common.c file.
The implementation of these functions is largely shared between K3 UDMA
and K3 UDMA v2. This refactor improves code reuse and maintainability
across multiple variants.
No functional changes intended.
Signed-off-by: Sai Sree Kartheek Adivi <s-adivi@...com>
---
drivers/dma/ti/k3-udma-common.c | 423 ++++++++++++++++++++++++++++++++
drivers/dma/ti/k3-udma.c | 423 --------------------------------
drivers/dma/ti/k3-udma.h | 25 ++
3 files changed, 448 insertions(+), 423 deletions(-)
diff --git a/drivers/dma/ti/k3-udma-common.c b/drivers/dma/ti/k3-udma-common.c
index 13f3d5cec2135..76f8c8cae676b 100644
--- a/drivers/dma/ti/k3-udma-common.c
+++ b/drivers/dma/ti/k3-udma-common.c
@@ -1863,3 +1863,426 @@ enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
}
}
+/**
+ * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
+ * @ud: UDMA device
+ * @from: Start the search from this flow id number
+ * @cnt: Number of consecutive flow ids to allocate
+ *
+ * Allocate range of RX flow ids for future use, those flows can be requested
+ * only using explicit flow id number. if @from is set to -1 it will try to find
+ * first free range. if @from is positive value it will force allocation only
+ * of the specified range of flows.
+ *
+ * Returns -ENOMEM if can't find free range.
+ * -EEXIST if requested range is busy.
+ * -EINVAL if wrong input values passed.
+ * Returns flow id on success.
+ */
+int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ int start, tmp_from;
+ DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
+
+ tmp_from = from;
+ if (tmp_from < 0)
+ tmp_from = ud->rchan_cnt;
+ /* default flows can't be allocated and accessible only by id */
+ if (tmp_from < ud->rchan_cnt)
+ return -EINVAL;
+
+ if (tmp_from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
+ ud->rflow_cnt);
+
+ start = bitmap_find_next_zero_area(tmp,
+ ud->rflow_cnt,
+ tmp_from, cnt, 0);
+ if (start >= ud->rflow_cnt)
+ return -ENOMEM;
+
+ if (from >= 0 && start != from)
+ return -EEXIST;
+
+ bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
+ return start;
+}
+
+int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
+{
+ if (from < ud->rchan_cnt)
+ return -EINVAL;
+ if (from + cnt > ud->rflow_cnt)
+ return -EINVAL;
+
+ bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
+ return 0;
+}
+
+struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
+{
+ /*
+ * Attempt to request rflow by ID can be made for any rflow
+ * if not in use with assumption that caller knows what's doing.
+ * TI-SCI FW will perform additional permission check ant way, it's
+ * safe
+ */
+
+ if (id < 0 || id >= ud->rflow_cnt)
+ return ERR_PTR(-ENOENT);
+
+ if (test_bit(id, ud->rflow_in_use))
+ return ERR_PTR(-ENOENT);
+
+ if (ud->rflow_gp_map) {
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev_dbg(ud->dev, "get rflow%d\n", id);
+ set_bit(id, ud->rflow_in_use);
+ return &ud->rflows[id];
+}
+
+void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
+{
+ if (!test_bit(rflow->id, ud->rflow_in_use)) {
+ dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
+ return;
+ }
+
+ dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
+ clear_bit(rflow->id, ud->rflow_in_use);
+}
+
+#define UDMA_RESERVE_RESOURCE(res) \
+struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
+ enum udma_tp_level tpl, \
+ int id) \
+{ \
+ if (id >= 0) { \
+ if (test_bit(id, ud->res##_map)) { \
+ dev_err(ud->dev, "res##%d is in use\n", id); \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } else { \
+ int start; \
+ \
+ if (tpl >= ud->res##_tpl.levels) \
+ tpl = ud->res##_tpl.levels - 1; \
+ \
+ start = ud->res##_tpl.start_idx[tpl]; \
+ \
+ id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
+ start); \
+ if (id == ud->res##_cnt) { \
+ return ERR_PTR(-ENOENT); \
+ } \
+ } \
+ \
+ set_bit(id, ud->res##_map); \
+ return &ud->res##s[id]; \
+}
+
+UDMA_RESERVE_RESOURCE(bchan);
+UDMA_RESERVE_RESOURCE(tchan);
+UDMA_RESERVE_RESOURCE(rchan);
+
+int udma_get_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
+ if (IS_ERR(uc->tchan)) {
+ ret = PTR_ERR(uc->tchan);
+ uc->tchan = NULL;
+ return ret;
+ }
+
+ if (ud->tflow_cnt) {
+ int tflow_id;
+
+ /* Only PKTDMA have support for tx flows */
+ if (uc->config.default_flow_id >= 0)
+ tflow_id = uc->config.default_flow_id;
+ else
+ tflow_id = uc->tchan->id;
+
+ if (test_bit(tflow_id, ud->tflow_map)) {
+ dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ return -ENOENT;
+ }
+
+ uc->tchan->tflow_id = tflow_id;
+ set_bit(tflow_id, ud->tflow_map);
+ } else {
+ uc->tchan->tflow_id = -1;
+ }
+
+ return 0;
+}
+
+int udma_get_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return 0;
+ }
+
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
+ if (IS_ERR(uc->rchan)) {
+ ret = PTR_ERR(uc->rchan);
+ uc->rchan = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+int udma_get_chan_pair(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ int chan_id, end;
+
+ if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
+ dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
+ uc->id, uc->tchan->id);
+ return 0;
+ }
+
+ if (uc->tchan) {
+ dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
+ uc->id, uc->tchan->id);
+ return -EBUSY;
+ } else if (uc->rchan) {
+ dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
+ uc->id, uc->rchan->id);
+ return -EBUSY;
+ }
+
+ /* Can be optimized, but let's have it like this for now */
+ end = min(ud->tchan_cnt, ud->rchan_cnt);
+ /*
+ * Try to use the highest TPL channel pair for MEM_TO_MEM channels
+ * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
+ */
+ chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
+ for (; chan_id < end; chan_id++) {
+ if (!test_bit(chan_id, ud->tchan_map) &&
+ !test_bit(chan_id, ud->rchan_map))
+ break;
+ }
+
+ if (chan_id == end)
+ return -ENOENT;
+
+ set_bit(chan_id, ud->tchan_map);
+ set_bit(chan_id, ud->rchan_map);
+ uc->tchan = &ud->tchans[chan_id];
+ uc->rchan = &ud->rchans[chan_id];
+
+ /* UDMA does not use tx flows */
+ uc->tchan->tflow_id = -1;
+
+ return 0;
+}
+
+int udma_get_rflow(struct udma_chan *uc, int flow_id)
+{
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ if (!uc->rchan) {
+ dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
+ return -EINVAL;
+ }
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
+ uc->id, uc->rflow->id);
+ return 0;
+ }
+
+ uc->rflow = __udma_get_rflow(ud, flow_id);
+ if (IS_ERR(uc->rflow)) {
+ ret = PTR_ERR(uc->rflow);
+ uc->rflow = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void udma_put_rchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rchan) {
+ dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
+ uc->rchan->id);
+ clear_bit(uc->rchan->id, ud->rchan_map);
+ uc->rchan = NULL;
+ }
+}
+
+void udma_put_tchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->tchan) {
+ dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
+ uc->tchan->id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+
+ if (uc->tchan->tflow_id >= 0)
+ clear_bit(uc->tchan->tflow_id, ud->tflow_map);
+
+ uc->tchan = NULL;
+ }
+}
+
+void udma_put_rflow(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->rflow) {
+ dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
+ uc->rflow->id);
+ __udma_put_rflow(ud, uc->rflow);
+ uc->rflow = NULL;
+ }
+}
+
+void udma_free_tx_resources(struct udma_chan *uc)
+{
+ if (!uc->tchan)
+ return;
+
+ k3_ringacc_ring_free(uc->tchan->t_ring);
+ k3_ringacc_ring_free(uc->tchan->tc_ring);
+ uc->tchan->t_ring = NULL;
+ uc->tchan->tc_ring = NULL;
+
+ udma_put_tchan(uc);
+}
+
+void udma_free_rx_resources(struct udma_chan *uc)
+{
+ if (!uc->rchan)
+ return;
+
+ if (uc->rflow) {
+ struct udma_rflow *rflow = uc->rflow;
+
+ k3_ringacc_ring_free(rflow->fd_ring);
+ k3_ringacc_ring_free(rflow->r_ring);
+ rflow->fd_ring = NULL;
+ rflow->r_ring = NULL;
+
+ udma_put_rflow(uc);
+ }
+
+ udma_put_rchan(uc);
+}
+
+void udma_free_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+
+ udma_terminate_all(chan);
+ if (uc->terminated_desc) {
+ ud->reset_chan(uc, false);
+ udma_reset_rings(uc);
+ }
+
+ cancel_delayed_work_sync(&uc->tx_drain.work);
+
+ if (uc->irq_num_ring > 0) {
+ free_irq(uc->irq_num_ring, uc);
+
+ uc->irq_num_ring = 0;
+ }
+ if (uc->irq_num_udma > 0) {
+ free_irq(uc->irq_num_udma, uc);
+
+ uc->irq_num_udma = 0;
+ }
+
+ /* Release PSI-L pairing */
+ if (uc->psil_paired) {
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+ }
+
+ vchan_free_chan_resources(&uc->vc);
+ tasklet_kill(&uc->vc.task);
+
+ bcdma_free_bchan_resources(uc);
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+}
+
+void bcdma_put_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
+ uc->bchan->id);
+ clear_bit(uc->bchan->id, ud->bchan_map);
+ uc->bchan = NULL;
+ uc->tchan = NULL;
+ }
+}
+
+void bcdma_free_bchan_resources(struct udma_chan *uc)
+{
+ if (!uc->bchan)
+ return;
+
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->tc_ring = NULL;
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+
+ bcdma_put_bchan(uc);
+}
+
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index e102f219ce34e..342530457be8e 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -423,135 +423,6 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-/**
- * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
- * @ud: UDMA device
- * @from: Start the search from this flow id number
- * @cnt: Number of consecutive flow ids to allocate
- *
- * Allocate range of RX flow ids for future use, those flows can be requested
- * only using explicit flow id number. if @from is set to -1 it will try to find
- * first free range. if @from is positive value it will force allocation only
- * of the specified range of flows.
- *
- * Returns -ENOMEM if can't find free range.
- * -EEXIST if requested range is busy.
- * -EINVAL if wrong input values passed.
- * Returns flow id on success.
- */
-static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
-{
- int start, tmp_from;
- DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
-
- tmp_from = from;
- if (tmp_from < 0)
- tmp_from = ud->rchan_cnt;
- /* default flows can't be allocated and accessible only by id */
- if (tmp_from < ud->rchan_cnt)
- return -EINVAL;
-
- if (tmp_from + cnt > ud->rflow_cnt)
- return -EINVAL;
-
- bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
- ud->rflow_cnt);
-
- start = bitmap_find_next_zero_area(tmp,
- ud->rflow_cnt,
- tmp_from, cnt, 0);
- if (start >= ud->rflow_cnt)
- return -ENOMEM;
-
- if (from >= 0 && start != from)
- return -EEXIST;
-
- bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
- return start;
-}
-
-static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
-{
- if (from < ud->rchan_cnt)
- return -EINVAL;
- if (from + cnt > ud->rflow_cnt)
- return -EINVAL;
-
- bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
- return 0;
-}
-
-static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
-{
- /*
- * Attempt to request rflow by ID can be made for any rflow
- * if not in use with assumption that caller knows what's doing.
- * TI-SCI FW will perform additional permission check ant way, it's
- * safe
- */
-
- if (id < 0 || id >= ud->rflow_cnt)
- return ERR_PTR(-ENOENT);
-
- if (test_bit(id, ud->rflow_in_use))
- return ERR_PTR(-ENOENT);
-
- if (ud->rflow_gp_map) {
- /* GP rflow has to be allocated first */
- if (!test_bit(id, ud->rflow_gp_map) &&
- !test_bit(id, ud->rflow_gp_map_allocated))
- return ERR_PTR(-EINVAL);
- }
-
- dev_dbg(ud->dev, "get rflow%d\n", id);
- set_bit(id, ud->rflow_in_use);
- return &ud->rflows[id];
-}
-
-static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
-{
- if (!test_bit(rflow->id, ud->rflow_in_use)) {
- dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
- return;
- }
-
- dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
- clear_bit(rflow->id, ud->rflow_in_use);
-}
-
-#define UDMA_RESERVE_RESOURCE(res) \
-static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
- enum udma_tp_level tpl, \
- int id) \
-{ \
- if (id >= 0) { \
- if (test_bit(id, ud->res##_map)) { \
- dev_err(ud->dev, "res##%d is in use\n", id); \
- return ERR_PTR(-ENOENT); \
- } \
- } else { \
- int start; \
- \
- if (tpl >= ud->res##_tpl.levels) \
- tpl = ud->res##_tpl.levels - 1; \
- \
- start = ud->res##_tpl.start_idx[tpl]; \
- \
- id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
- start); \
- if (id == ud->res##_cnt) { \
- return ERR_PTR(-ENOENT); \
- } \
- } \
- \
- set_bit(id, ud->res##_map); \
- return &ud->res##s[id]; \
-}
-
-UDMA_RESERVE_RESOURCE(bchan);
-UDMA_RESERVE_RESOURCE(tchan);
-UDMA_RESERVE_RESOURCE(rchan);
-
static int bcdma_get_bchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -585,223 +456,6 @@ static int bcdma_get_bchan(struct udma_chan *uc)
return 0;
}
-static int udma_get_tchan(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
- int ret;
-
- if (uc->tchan) {
- dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
- uc->id, uc->tchan->id);
- return 0;
- }
-
- /*
- * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
- * For PKTDMA mapped channels it is configured to a channel which must
- * be used to service the peripheral.
- */
- uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
- uc->config.mapped_channel_id);
- if (IS_ERR(uc->tchan)) {
- ret = PTR_ERR(uc->tchan);
- uc->tchan = NULL;
- return ret;
- }
-
- if (ud->tflow_cnt) {
- int tflow_id;
-
- /* Only PKTDMA have support for tx flows */
- if (uc->config.default_flow_id >= 0)
- tflow_id = uc->config.default_flow_id;
- else
- tflow_id = uc->tchan->id;
-
- if (test_bit(tflow_id, ud->tflow_map)) {
- dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
- clear_bit(uc->tchan->id, ud->tchan_map);
- uc->tchan = NULL;
- return -ENOENT;
- }
-
- uc->tchan->tflow_id = tflow_id;
- set_bit(tflow_id, ud->tflow_map);
- } else {
- uc->tchan->tflow_id = -1;
- }
-
- return 0;
-}
-
-static int udma_get_rchan(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
- int ret;
-
- if (uc->rchan) {
- dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
- uc->id, uc->rchan->id);
- return 0;
- }
-
- /*
- * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
- * For PKTDMA mapped channels it is configured to a channel which must
- * be used to service the peripheral.
- */
- uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
- uc->config.mapped_channel_id);
- if (IS_ERR(uc->rchan)) {
- ret = PTR_ERR(uc->rchan);
- uc->rchan = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int udma_get_chan_pair(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
- int chan_id, end;
-
- if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
- dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
- uc->id, uc->tchan->id);
- return 0;
- }
-
- if (uc->tchan) {
- dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
- uc->id, uc->tchan->id);
- return -EBUSY;
- } else if (uc->rchan) {
- dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
- uc->id, uc->rchan->id);
- return -EBUSY;
- }
-
- /* Can be optimized, but let's have it like this for now */
- end = min(ud->tchan_cnt, ud->rchan_cnt);
- /*
- * Try to use the highest TPL channel pair for MEM_TO_MEM channels
- * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
- */
- chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
- for (; chan_id < end; chan_id++) {
- if (!test_bit(chan_id, ud->tchan_map) &&
- !test_bit(chan_id, ud->rchan_map))
- break;
- }
-
- if (chan_id == end)
- return -ENOENT;
-
- set_bit(chan_id, ud->tchan_map);
- set_bit(chan_id, ud->rchan_map);
- uc->tchan = &ud->tchans[chan_id];
- uc->rchan = &ud->rchans[chan_id];
-
- /* UDMA does not use tx flows */
- uc->tchan->tflow_id = -1;
-
- return 0;
-}
-
-static int udma_get_rflow(struct udma_chan *uc, int flow_id)
-{
- struct udma_dev *ud = uc->ud;
- int ret;
-
- if (!uc->rchan) {
- dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
- return -EINVAL;
- }
-
- if (uc->rflow) {
- dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
- uc->id, uc->rflow->id);
- return 0;
- }
-
- uc->rflow = __udma_get_rflow(ud, flow_id);
- if (IS_ERR(uc->rflow)) {
- ret = PTR_ERR(uc->rflow);
- uc->rflow = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static void bcdma_put_bchan(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
-
- if (uc->bchan) {
- dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
- uc->bchan->id);
- clear_bit(uc->bchan->id, ud->bchan_map);
- uc->bchan = NULL;
- uc->tchan = NULL;
- }
-}
-
-static void udma_put_rchan(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
-
- if (uc->rchan) {
- dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
- uc->rchan->id);
- clear_bit(uc->rchan->id, ud->rchan_map);
- uc->rchan = NULL;
- }
-}
-
-static void udma_put_tchan(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
-
- if (uc->tchan) {
- dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
- uc->tchan->id);
- clear_bit(uc->tchan->id, ud->tchan_map);
-
- if (uc->tchan->tflow_id >= 0)
- clear_bit(uc->tchan->tflow_id, ud->tflow_map);
-
- uc->tchan = NULL;
- }
-}
-
-static void udma_put_rflow(struct udma_chan *uc)
-{
- struct udma_dev *ud = uc->ud;
-
- if (uc->rflow) {
- dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
- uc->rflow->id);
- __udma_put_rflow(ud, uc->rflow);
- uc->rflow = NULL;
- }
-}
-
-static void bcdma_free_bchan_resources(struct udma_chan *uc)
-{
- if (!uc->bchan)
- return;
-
- k3_ringacc_ring_free(uc->bchan->tc_ring);
- k3_ringacc_ring_free(uc->bchan->t_ring);
- uc->bchan->tc_ring = NULL;
- uc->bchan->t_ring = NULL;
- k3_configure_chan_coherency(&uc->vc.chan, 0);
-
- bcdma_put_bchan(uc);
-}
-
static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
{
struct k3_ring_cfg ring_cfg;
@@ -847,19 +501,6 @@ static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
return ret;
}
-static void udma_free_tx_resources(struct udma_chan *uc)
-{
- if (!uc->tchan)
- return;
-
- k3_ringacc_ring_free(uc->tchan->t_ring);
- k3_ringacc_ring_free(uc->tchan->tc_ring);
- uc->tchan->t_ring = NULL;
- uc->tchan->tc_ring = NULL;
-
- udma_put_tchan(uc);
-}
-
static int udma_alloc_tx_resources(struct udma_chan *uc)
{
struct k3_ring_cfg ring_cfg;
@@ -917,25 +558,6 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
return ret;
}
-static void udma_free_rx_resources(struct udma_chan *uc)
-{
- if (!uc->rchan)
- return;
-
- if (uc->rflow) {
- struct udma_rflow *rflow = uc->rflow;
-
- k3_ringacc_ring_free(rflow->fd_ring);
- k3_ringacc_ring_free(rflow->r_ring);
- rflow->fd_ring = NULL;
- rflow->r_ring = NULL;
-
- udma_put_rflow(uc);
- }
-
- udma_put_rchan(uc);
-}
-
static int udma_alloc_rx_resources(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -2024,51 +1646,6 @@ static int udma_resume(struct dma_chan *chan)
return 0;
}
-static void udma_free_chan_resources(struct dma_chan *chan)
-{
- struct udma_chan *uc = to_udma_chan(chan);
- struct udma_dev *ud = to_udma_dev(chan->device);
-
- udma_terminate_all(chan);
- if (uc->terminated_desc) {
- ud->reset_chan(uc, false);
- udma_reset_rings(uc);
- }
-
- cancel_delayed_work_sync(&uc->tx_drain.work);
-
- if (uc->irq_num_ring > 0) {
- free_irq(uc->irq_num_ring, uc);
-
- uc->irq_num_ring = 0;
- }
- if (uc->irq_num_udma > 0) {
- free_irq(uc->irq_num_udma, uc);
-
- uc->irq_num_udma = 0;
- }
-
- /* Release PSI-L pairing */
- if (uc->psil_paired) {
- navss_psil_unpair(ud, uc->config.src_thread,
- uc->config.dst_thread);
- uc->psil_paired = false;
- }
-
- vchan_free_chan_resources(&uc->vc);
- tasklet_kill(&uc->vc.task);
-
- bcdma_free_bchan_resources(uc);
- udma_free_tx_resources(uc);
- udma_free_rx_resources(uc);
- udma_reset_uchan(uc);
-
- if (uc->use_dma_pool) {
- dma_pool_destroy(uc->hdesc_pool);
- uc->use_dma_pool = false;
- }
-}
-
static struct platform_driver udma_driver;
static struct platform_driver bcdma_driver;
static struct platform_driver pktdma_driver;
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index ca3d7aebba13b..75b0348fffc5d 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -648,6 +648,31 @@ void udma_dbg_summary_show(struct seq_file *s,
struct dma_device *dma_dev);
#endif /* CONFIG_DEBUG_FS */
+int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt);
+struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id);
+void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow);
+int udma_get_tchan(struct udma_chan *uc);
+int udma_get_rchan(struct udma_chan *uc);
+int udma_get_chan_pair(struct udma_chan *uc);
+int udma_get_rflow(struct udma_chan *uc, int flow_id);
+void udma_put_rchan(struct udma_chan *uc);
+void udma_put_tchan(struct udma_chan *uc);
+void udma_put_rflow(struct udma_chan *uc);
+void udma_free_tx_resources(struct udma_chan *uc);
+void udma_free_rx_resources(struct udma_chan *uc);
+void udma_free_chan_resources(struct dma_chan *chan);
+void bcdma_put_bchan(struct udma_chan *uc);
+void bcdma_free_bchan_resources(struct udma_chan *uc);
+
+#define _UDMA_RESERVE_RESOURCE(res) \
+struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
+ enum udma_tp_level tpl, \
+ int id);
+_UDMA_RESERVE_RESOURCE(bchan);
+_UDMA_RESERVE_RESOURCE(tchan);
+_UDMA_RESERVE_RESOURCE(rchan);
+
/* Direct access to UDMA low lever resources for the glue layer */
int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread);
int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
--
2.34.1
Powered by blists - more mailing lists