lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Mon, 16 Nov 2020 08:14:53 +0000
From:   jackygam2001 <jacky_gam_2001@....com>
To:     hch@....de, sagi@...mberg.me
Cc:     linux-nvme@...ts.infradead.org, linux-kernel@...r.kernel.org,
        jackygam2001 <jacky_gam_2001@....com>
Subject: [PATCH] NVME:target:rdma fix bug nonemtpy wait list of queue causing kernel panic

in our lab, when target is processing queue's connection
(queue->state==NVMET_RDMA_Q_CONNECTING), and the host sends
a command which will be put into wait list to target,
Before the command was processed, the host sent a discconnect
command to target, then the target will process disconnect command,
it will schedule a kworker to free the pre allocatedrsps array, and
it will cause kernel panic, because some items of rsps are in wait list.

Signed-off-by: jackygam2001 <jacky_gam_2001@....com>
---
 drivers/nvme/target/rdma.c | 31 +++++++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ae6620489457..c6c892a43f68 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1335,6 +1335,35 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
 		       queue->send_queue_size + 1);
 }
 
+static void nvmet_rdma_destroy_queue_wait_list(struct nvmet_rdma_queue *queue)
+{
+	unsigned long flags;
+	struct nvmet_rdma_rsp *rsp;
+
+	spin_lock_irqsave(&queue->state_lock, flags);
+	while (!list_empty(&queue->rsp_wait_list)) {
+		rsp = list_first_entry(&queue->rsp_wait_list,
+				struct nvmet_rdma_rsp, wait_list);
+		list_del(&rsp->wait_list);
+		nvmet_rdma_put_rsp(rsp);
+	}
+	spin_unlock_irqrestore(&queue->state_lock, flags);
+}
+
+static void nvmet_rdma_destroy_queue_wr_wait_list(struct nvmet_rdma_queue *queue)
+{
+	struct nvmet_rdma_rsp *rsp;
+
+	spin_lock(&queue->rsp_wr_wait_lock);
+	while (!list_empty(&queue->rsp_wr_wait_list)) {
+		rsp = list_first_entry(&queue->rsp_wr_wait_list,
+				struct nvmet_rdma_rsp, wait_list);
+		list_del(&rsp->wait_list);
+		nvmet_rdma_put_rsp(rsp);
+	}
+	spin_unlock(&queue->rsp_wr_wait_lock);
+}
+
 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
 {
 	pr_debug("freeing queue %d\n", queue->idx);
@@ -1347,6 +1376,8 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
 				queue->recv_queue_size,
 				!queue->host_qid);
 	}
+	nvmet_rdma_destroy_queue_wr_wait_list(queue);
+	nvmet_rdma_destroy_queue_wait_list(queue);
 	nvmet_rdma_free_rsps(queue);
 	ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
 	kfree(queue);
-- 
2.17.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ