lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 24 Mar 2020 19:47:58 +0100
From:   Michael Walle <michael@...le.cc>
To:     linux-serial@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     Jiri Slaby <jslaby@...e.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Michael Walle <michael@...le.cc>,
        Leonard Crestez <leonard.crestez@....com>
Subject: [RFC PATCH 3/3] tty: serial: fsl_lpuart: fix possible console deadlock

If the kernel console output is on this console any
dev_{err,warn,info}() may result in a deadlock if the sport->port.lock
spinlock is already held. This is because the _console_write() try to
aquire this lock, too. Remove any error messages where the spinlock is
taken or print after the lock is released.

Reported-by: Leonard Crestez <leonard.crestez@....com>
Signed-off-by: Michael Walle <michael@...le.cc>
---
 drivers/tty/serial/fsl_lpuart.c | 35 +++++++--------------------------
 1 file changed, 7 insertions(+), 28 deletions(-)

diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index bbba298b68a4..0910308b38b1 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -420,7 +420,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
 {
 	struct circ_buf *xmit = &sport->port.state->xmit;
 	struct scatterlist *sgl = sport->tx_sgl;
-	struct device *dev = sport->port.dev;
 	struct dma_chan *chan = sport->dma_tx_chan;
 	int ret;
 
@@ -442,10 +441,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
 
 	ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents,
 			 DMA_TO_DEVICE);
-	if (!ret) {
-		dev_err(dev, "DMA mapping error for TX.\n");
+	if (!ret)
 		return;
-	}
 
 	sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl,
 					ret, DMA_MEM_TO_DEV,
@@ -453,7 +450,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
 	if (!sport->dma_tx_desc) {
 		dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
 			      DMA_TO_DEVICE);
-		dev_err(dev, "Cannot prepare TX slave DMA!\n");
 		return;
 	}
 
@@ -520,21 +516,12 @@ static int lpuart_dma_tx_request(struct uart_port *port)
 	struct lpuart_port *sport = container_of(port,
 					struct lpuart_port, port);
 	struct dma_slave_config dma_tx_sconfig = {};
-	int ret;
 
 	dma_tx_sconfig.dst_addr = lpuart_dma_datareg_addr(sport);
 	dma_tx_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 	dma_tx_sconfig.dst_maxburst = 1;
 	dma_tx_sconfig.direction = DMA_MEM_TO_DEV;
-	ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
-
-	if (ret) {
-		dev_err(sport->port.dev,
-				"DMA slave config failed, err = %d\n", ret);
-		return ret;
-	}
-
-	return 0;
+	return dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
 }
 
 static bool lpuart_is_32(struct lpuart_port *sport)
@@ -1074,8 +1061,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
 
 	dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
 	if (dmastat == DMA_ERROR) {
-		dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
 		spin_unlock_irqrestore(&sport->port.lock, flags);
+		dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
 		return;
 	}
 
@@ -1179,23 +1166,17 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
 	sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
 	nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1,
 			  DMA_FROM_DEVICE);
-
-	if (!nent) {
-		dev_err(sport->port.dev, "DMA Rx mapping error\n");
+	if (!nent)
 		return -EINVAL;
-	}
 
 	dma_rx_sconfig.src_addr = lpuart_dma_datareg_addr(sport);
 	dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 	dma_rx_sconfig.src_maxburst = 1;
 	dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
-	ret = dmaengine_slave_config(chan, &dma_rx_sconfig);
 
-	if (ret < 0) {
-		dev_err(sport->port.dev,
-				"DMA Rx slave config failed, err = %d\n", ret);
+	ret = dmaengine_slave_config(chan, &dma_rx_sconfig);
+	if (ret < 0)
 		return ret;
-	}
 
 	sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan,
 				 sg_dma_address(&sport->rx_sgl),
@@ -1203,10 +1184,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
 				 sport->rx_sgl.length / 2,
 				 DMA_DEV_TO_MEM,
 				 DMA_PREP_INTERRUPT);
-	if (!sport->dma_rx_desc) {
-		dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
+	if (!sport->dma_rx_desc)
 		return -EFAULT;
-	}
 
 	sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
 	sport->dma_rx_desc->callback_param = sport;
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ