lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <1475817915-11976-6-git-send-email-okaya@codeaurora.org>
Date:   Fri,  7 Oct 2016 01:25:10 -0400
From:   Sinan Kaya <okaya@...eaurora.org>
To:     dmaengine@...r.kernel.org, timur@...eaurora.org,
        devicetree@...r.kernel.org, cov@...eaurora.org,
        vinod.koul@...el.com, jcm@...hat.com
Cc:     agross@...eaurora.org, arnd@...db.de,
        linux-arm-msm@...r.kernel.org,
        linux-arm-kernel@...ts.infradead.org,
        Sinan Kaya <okaya@...eaurora.org>,
        Dan Williams <dan.j.williams@...el.com>,
        Andy Shevchenko <andy.shevchenko@...il.com>,
        linux-kernel@...r.kernel.org
Subject: [PATCH V5 05/10] dmaengine: qcom_hidma: make pending_tre_count atomic

Getting ready for the MSI interrupts. The pending_tre_count is used
in the interrupt handler to make sure all outstanding requests are
serviced.

The driver will allocate 11 MSI interrupts. Each MSI interrupt can be
assigned to a different CPU. Then, we have a race condition for common
variables as they share the same interrupt handler with a different
cause bit and they can potentially be executed in parallel. Making this
variable atomic so that it can be updated from multiple processor
contexts.

Signed-off-by: Sinan Kaya <okaya@...eaurora.org>
---
 drivers/dma/qcom/hidma.h     |  2 +-
 drivers/dma/qcom/hidma_dbg.c |  3 ++-
 drivers/dma/qcom/hidma_ll.c  | 13 ++++++-------
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index b209942..afaeb9a 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -58,7 +58,7 @@ struct hidma_lldev {
 	void __iomem *evca;		/* Event Channel address          */
 	struct hidma_tre
 		**pending_tre_list;	/* Pointers to pending TREs	  */
-	s32 pending_tre_count;		/* Number of TREs pending	  */
+	atomic_t pending_tre_count;	/* Number of TREs pending	  */
 
 	void *tre_ring;			/* TRE ring			  */
 	dma_addr_t tre_dma;		/* TRE ring to be shared with HW  */
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index 3d83b99..3bdcb80 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
 	seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
 	seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
 	seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
-	seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+	seq_printf(s, "pending_tre_count=%d\n",
+			atomic_read(&lldev->pending_tre_count));
 	seq_printf(s, "evca=%p\n", lldev->evca);
 	seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
 	seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index ad20dfb..a4fc941 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -218,10 +218,9 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
 	 * Keep track of pending TREs that SW is expecting to receive
 	 * from HW. We got one now. Decrement our counter.
 	 */
-	lldev->pending_tre_count--;
-	if (lldev->pending_tre_count < 0) {
+	if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
 		dev_warn(lldev->dev, "tre count mismatch on completion");
-		lldev->pending_tre_count = 0;
+		atomic_set(&lldev->pending_tre_count, 0);
 	}
 
 	spin_unlock_irqrestore(&lldev->lock, flags);
@@ -321,7 +320,7 @@ void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
 	u32 tre_read_off;
 
 	tre_iterator = lldev->tre_processed_off;
-	while (lldev->pending_tre_count) {
+	while (atomic_read(&lldev->pending_tre_count)) {
 		if (hidma_post_completed(lldev, tre_iterator, err_info,
 					 err_code))
 			break;
@@ -564,7 +563,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
 	tre->err_code = 0;
 	tre->err_info = 0;
 	tre->queued = 1;
-	lldev->pending_tre_count++;
+	atomic_inc(&lldev->pending_tre_count);
 	lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
 					% lldev->tre_ring_size;
 	spin_unlock_irqrestore(&lldev->lock, flags);
@@ -670,7 +669,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
 	u32 val;
 	u32 nr_tres = lldev->nr_tres;
 
-	lldev->pending_tre_count = 0;
+	atomic_set(&lldev->pending_tre_count, 0);
 	lldev->tre_processed_off = 0;
 	lldev->evre_processed_off = 0;
 	lldev->tre_write_offset = 0;
@@ -834,7 +833,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
 	tasklet_kill(&lldev->rst_task);
 	memset(lldev->trepool, 0, required_bytes);
 	lldev->trepool = NULL;
-	lldev->pending_tre_count = 0;
+	atomic_set(&lldev->pending_tre_count, 0);
 	lldev->tre_write_offset = 0;
 
 	rc = hidma_ll_reset(lldev);
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ