[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20200602124824.4293d0dc@canb.auug.org.au>
Date: Tue, 2 Jun 2020 12:48:24 +1000
From: Stephen Rothwell <sfr@...b.auug.org.au>
To: Jens Axboe <axboe@...nel.dk>, Doug Ledford <dledford@...hat.com>,
Jason Gunthorpe <jgg@...lanox.com>
Cc: Linux Next Mailing List <linux-next@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Yamin Friedman <yaminf@...lanox.com>,
Max Gurtovoy <maxg@...lanox.com>,
Christoph Hellwig <hch@....de>
Subject: linux-next: manual merge of the block tree with the rdma tree
Hi all,
Today's linux-next merge of the block tree got a conflict in:
drivers/nvme/host/rdma.c
between commit:
583f69304b91 ("nvme-rdma: use new shared CQ mechanism")
from the rdma tree and commit:
5ec5d3bddc6b ("nvme-rdma: add metadata/T10-PI support")
from the block tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
--
Cheers,
Stephen Rothwell
diff --cc drivers/nvme/host/rdma.c
index 83d5f292c937,f8f856dc0c67..000000000000
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@@ -85,7 -95,7 +95,8 @@@ struct nvme_rdma_queue
struct rdma_cm_id *cm_id;
int cm_error;
struct completion cm_done;
+ int cq_size;
+ bool pi_support;
};
struct nvme_rdma_ctrl {
@@@ -262,7 -272,8 +273,9 @@@ static int nvme_rdma_create_qp(struct n
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = queue->ib_cq;
init_attr.recv_cq = queue->ib_cq;
+ init_attr.qp_context = queue;
+ if (queue->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
@@@ -426,43 -437,18 +447,49 @@@ static void nvme_rdma_destroy_queue_ib(
nvme_rdma_dev_put(dev);
}
- static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+ static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
{
- return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ibdev->attrs.max_fast_reg_page_list_len - 1);
+ u32 max_page_list_len;
+
+ if (pi_support)
+ max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
+ else
+ max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
+
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
}
+static int nvme_rdma_create_cq(struct ib_device *ibdev,
+ struct nvme_rdma_queue *queue)
+{
+ int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
+ enum ib_poll_context poll_ctx;
+
+ /*
+ * Spread I/O queues completion vectors according their queue index.
+ * Admin queues can always go on completion vector 0.
+ */
+ comp_vector = idx == 0 ? idx : idx - 1;
+
+ /* Polling queues need direct cq polling context */
+ if (nvme_rdma_poll_queue(queue)) {
+ poll_ctx = IB_POLL_DIRECT;
+ queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
+ comp_vector, poll_ctx);
+ } else {
+ poll_ctx = IB_POLL_SOFTIRQ;
+ queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
+ comp_vector, poll_ctx);
+ }
+
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ return ret;
+ }
+
+ return 0;
+}
+
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
{
struct ib_device *ibdev;
Content of type "application/pgp-signature" skipped
Powered by blists - more mailing lists