lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190910115037.23539-4-peter.ujfalusi@ti.com>
Date:   Tue, 10 Sep 2019 14:50:37 +0300
From:   Peter Ujfalusi <peter.ujfalusi@...com>
To:     <vkoul@...nel.org>, <robh+dt@...nel.org>
CC:     <dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <dan.j.williams@...el.com>, <devicetree@...r.kernel.org>
Subject: [PATCH 3/3] dmaengine: Support for requesting channels preferring DMA domain controller

In case the channel is not requested via the slave API, use the
of_find_dma_domain() to see if a system default DMA controller is
specified.

Add new function which can be used by clients to request channels by mask
from their DMA domain controller if specified.

Client drivers can take advantage of the domain support by moving from
dma_request_chan_by_mask() to dma_request_chan_by_domain()

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@...com>
---
 drivers/dma/dmaengine.c   | 21 ++++++++++++++++-----
 include/linux/dmaengine.h |  9 ++++++---
 2 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 03ac4b96117c..1bae3ff24da0 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -640,6 +640,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 	struct dma_device *device, *_d;
 	struct dma_chan *chan = NULL;
 
+	/* If np is not specified, get the default DMA domain controller */
+	if (!np)
+		np = of_find_dma_domain(NULL);
+
 	/* Find a channel */
 	mutex_lock(&dma_list_mutex);
 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
@@ -751,19 +755,26 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 
 /**
- * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
- * @mask: capabilities that the channel must satisfy
+ * dma_request_chan_by_domain - allocate a channel by mask from DMA domain
+ * @dev:	pointer to client device structure
+ * @mask:	capabilities that the channel must satisfy
  *
  * Returns pointer to appropriate DMA channel on success or an error pointer.
  */
-struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
+struct dma_chan *dma_request_chan_by_domain(struct device *dev,
+					    const dma_cap_mask_t *mask)
 {
 	struct dma_chan *chan;
 
 	if (!mask)
 		return ERR_PTR(-ENODEV);
 
-	chan = __dma_request_channel(mask, NULL, NULL, NULL);
+	if (dev)
+		chan = __dma_request_channel(mask, NULL, NULL,
+					     of_find_dma_domain(dev->of_node));
+	else
+		chan = __dma_request_channel(mask, NULL, NULL, NULL);
+
 	if (!chan) {
 		mutex_lock(&dma_list_mutex);
 		if (list_empty(&dma_device_list))
@@ -775,7 +786,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 
 	return chan;
 }
-EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
+EXPORT_SYMBOL_GPL(dma_request_chan_by_domain);
 
 void dma_release_channel(struct dma_chan *chan)
 {
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8fcdee1c0cf9..de5c52810443 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1307,7 +1307,8 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 
 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
-struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
+struct dma_chan *dma_request_chan_by_domain(struct device *dev,
+					    const dma_cap_mask_t *mask);
 
 void dma_release_channel(struct dma_chan *chan);
 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
@@ -1344,8 +1345,8 @@ static inline struct dma_chan *dma_request_chan(struct device *dev,
 {
 	return ERR_PTR(-ENODEV);
 }
-static inline struct dma_chan *dma_request_chan_by_mask(
-						const dma_cap_mask_t *mask)
+static inline struct dma_chan *dma_request_chan_by_domain(struct device *dev,
+			const dma_cap_mask_t *mask)
 {
 	return ERR_PTR(-ENODEV);
 }
@@ -1406,6 +1407,8 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
 	__dma_request_channel(&(mask), x, y, NULL)
 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
 	__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+#define dma_request_chan_by_mask(mask) \
+	dma_request_chan_by_domain(NULL, mask)
 
 static inline struct dma_chan
 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
-- 
Peter

Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki.
Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ