[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251128-qcom-qce-cmd-descr-v9-3-9a5f72b89722@linaro.org>
Date: Fri, 28 Nov 2025 12:44:01 +0100
From: Bartosz Golaszewski <brgl@...ev.pl>
To: Vinod Koul <vkoul@...nel.org>, Jonathan Corbet <corbet@....net>,
Thara Gopinath <thara.gopinath@...il.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
"David S. Miller" <davem@...emloft.net>,
Udit Tiwari <quic_utiwari@...cinc.com>,
Daniel Perez-Zoghbi <dperezzo@...cinc.com>,
Md Sadre Alam <mdalam@....qualcomm.com>,
Dmitry Baryshkov <lumag@...nel.org>
Cc: dmaengine@...r.kernel.org, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-arm-msm@...r.kernel.org,
linux-crypto@...r.kernel.org,
Bartosz Golaszewski <bartosz.golaszewski@...aro.org>
Subject: [PATCH v9 03/11] dmaengine: qcom: bam_dma: implement support for
BAM locking
From: Bartosz Golaszewski <bartosz.golaszewski@...aro.org>
Use metadata operations in DMA descriptors to allow BAM users to pass
additional information to the engine. To that end: define a new
structure - struct bam_desc_metadata - as a medium and define two new
commands: for locking and unlocking the BAM respectively. Handle the
locking in the .attach() callback.
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@...aro.org>
---
drivers/dma/qcom/bam_dma.c | 59 +++++++++++++++++++++++++++++++++++++++-
include/linux/dma/qcom_bam_dma.h | 12 ++++++++
2 files changed, 70 insertions(+), 1 deletion(-)
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index c9ae1fffe44d79c5eb59b8bbf7f147a8fa3aa0bd..d1dc80b29818897b333cd223ec7306a169cc51fd 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_bam_dma.h>
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -391,6 +392,8 @@ struct bam_chan {
struct list_head desc_list;
struct list_head node;
+
+ bool bam_locked;
};
static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
@@ -655,6 +658,53 @@ static int bam_slave_config(struct dma_chan *chan,
return 0;
}
+static int bam_metadata_attach(struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ struct virt_dma_desc *vd = container_of(desc, struct virt_dma_desc, tx);
+ struct bam_async_desc *async_desc = container_of(vd, struct bam_async_desc, vd);
+ struct bam_desc_hw *hw_desc = async_desc->desc;
+ struct bam_desc_metadata *metadata = data;
+ struct bam_chan *bchan = to_bam_chan(metadata->chan);
+ struct bam_device *bdev = bchan->bdev;
+
+ if (!data)
+ return -EINVAL;
+
+ if (metadata->op == BAM_META_CMD_LOCK || metadata->op == BAM_META_CMD_UNLOCK) {
+ if (!bdev->dev_data->bam_pipe_lock)
+ return -EOPNOTSUPP;
+
+ /* Expecting a dummy write when locking, only one descriptor allowed. */
+ if (async_desc->num_desc != 1)
+ return -EINVAL;
+ }
+
+ switch (metadata->op) {
+ case BAM_META_CMD_LOCK:
+ if (bchan->bam_locked)
+ return -EBUSY;
+
+ hw_desc->flags |= DESC_FLAG_LOCK;
+ bchan->bam_locked = true;
+ break;
+ case BAM_META_CMD_UNLOCK:
+ if (!bchan->bam_locked)
+ return -EPERM;
+
+ hw_desc->flags |= DESC_FLAG_UNLOCK;
+ bchan->bam_locked = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct dma_descriptor_metadata_ops bam_metadata_ops = {
+ .attach = bam_metadata_attach,
+};
+
/**
* bam_prep_slave_sg - Prep slave sg transaction
*
@@ -671,6 +721,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
void *context)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct dma_async_tx_descriptor *tx_desc;
struct bam_device *bdev = bchan->bdev;
struct bam_async_desc *async_desc;
struct scatterlist *sg;
@@ -732,7 +783,12 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
} while (remainder > 0);
}
- return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+ tx_desc = vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+ if (!tx_desc)
+ return NULL;
+
+ tx_desc->metadata_ops = &bam_metadata_ops;
+ return tx_desc;
}
/**
@@ -1372,6 +1428,7 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->common.device_terminate_all = bam_dma_terminate_all;
bdev->common.device_issue_pending = bam_issue_pending;
bdev->common.device_tx_status = bam_tx_status;
+ bdev->common.desc_metadata_modes = DESC_METADATA_CLIENT;
bdev->common.dev = bdev->dev;
ret = dma_async_device_register(&bdev->common);
diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
index 68fc0e643b1b97fe4520d5878daa322b81f4f559..dd30bb9c520fac7bd98c5a47e56a5a286331461a 100644
--- a/include/linux/dma/qcom_bam_dma.h
+++ b/include/linux/dma/qcom_bam_dma.h
@@ -8,6 +8,8 @@
#include <asm/byteorder.h>
+struct dma_chan;
+
/*
* This data type corresponds to the native Command Element
* supported by BAM DMA Engine.
@@ -34,6 +36,16 @@ enum bam_command_type {
BAM_READ_COMMAND,
};
+enum bam_desc_metadata_op {
+ BAM_META_CMD_LOCK = 1,
+ BAM_META_CMD_UNLOCK,
+};
+
+struct bam_desc_metadata {
+ enum bam_desc_metadata_op op;
+ struct dma_chan *chan;
+};
+
/*
* prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
* element with the data already in le32 format.
--
2.51.0
Powered by blists - more mailing lists