[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190904175314.652104086@linuxfoundation.org>
Date: Wed, 4 Sep 2019 19:52:37 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Nicolas Morey-Chaisemartin <NMoreyChaisemartin@...e.com>,
Max Gurtovoy <maxg@...lanox.com>,
Hannes Reinecke <hare@...e.com>,
Sagi Grimberg <sagi@...mberg.me>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.2 014/143] nvme-rdma: fix possible use-after-free in connect error flow
[ Upstream commit d94211b8bad3787e0655a67284105f57db728cb1 ]
When start_queue fails, we need to make sure to drain the
queue cq before freeing the rdma resources because we might
still race with the completion path. Have start_queue() error
path safely stop the queue.
--
[30371.808111] nvme nvme1: Failed reconnect attempt 11
[30371.808113] nvme nvme1: Reconnecting in 10 seconds...
[...]
[30382.069315] nvme nvme1: creating 4 I/O queues.
[30382.257058] nvme nvme1: Connect Invalid SQE Parameter, qid 4
[30382.257061] nvme nvme1: failed to connect queue: 4 ret=386
[30382.305001] BUG: unable to handle kernel NULL pointer dereference at 0000000000000018
[30382.305022] IP: qedr_poll_cq+0x8a3/0x1170 [qedr]
[30382.305028] PGD 0 P4D 0
[30382.305037] Oops: 0000 [#1] SMP PTI
[...]
[30382.305153] Call Trace:
[30382.305166] ? __switch_to_asm+0x34/0x70
[30382.305187] __ib_process_cq+0x56/0xd0 [ib_core]
[30382.305201] ib_poll_handler+0x26/0x70 [ib_core]
[30382.305213] irq_poll_softirq+0x88/0x110
[30382.305223] ? sort_range+0x20/0x20
[30382.305232] __do_softirq+0xde/0x2c6
[30382.305241] ? sort_range+0x20/0x20
[30382.305249] run_ksoftirqd+0x1c/0x60
[30382.305258] smpboot_thread_fn+0xef/0x160
[30382.305265] kthread+0x113/0x130
[30382.305273] ? kthread_create_worker_on_cpu+0x50/0x50
[30382.305281] ret_from_fork+0x35/0x40
--
Reported-by: Nicolas Morey-Chaisemartin <NMoreyChaisemartin@...e.com>
Reviewed-by: Max Gurtovoy <maxg@...lanox.com>
Reviewed-by: Hannes Reinecke <hare@...e.com>
Signed-off-by: Sagi Grimberg <sagi@...mberg.me>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/nvme/host/rdma.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 97f668a39ae1c..7b074323bcdf2 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -562,13 +562,17 @@ out_destroy_cm_id:
return ret;
}
+static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
return;
-
- rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->qp);
+ __nvme_rdma_stop_queue(queue);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
- if (!ret)
+ if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
- else
+ } else {
+ __nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
+ }
return ret;
}
--
2.20.1
Powered by blists - more mailing lists