[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190625205701.17849-11-saeedm@mellanox.com>
Date: Tue, 25 Jun 2019 20:57:54 +0000
From: Saeed Mahameed <saeedm@...lanox.com>
To: "David S. Miller" <davem@...emloft.net>,
Doug Ledford <dledford@...hat.com>,
Jason Gunthorpe <jgg@...lanox.com>
CC: Leon Romanovsky <leonro@...lanox.com>,
Or Gerlitz <ogerlitz@...lanox.com>,
Sagi Grimberg <sagi@...mberg.me>,
Tal Gilboa <talgi@...lanox.com>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"linux-rdma@...r.kernel.org" <linux-rdma@...r.kernel.org>,
Yamin Friedman <yaminf@...lanox.com>,
Max Gurtovoy <maxg@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>
Subject: [for-next V2 10/10] RDMA/core: Provide RDMA DIM support for ULPs
From: Yamin Friedman <yaminf@...lanox.com>
Added the interface in the infiniband driver that applies the rdma_dim
adaptive moderation. There is now a special function for allocating an
ib_cq that uses rdma_dim.
Performance improvement (ConnectX-5 100GbE, x86) running FIO benchmark over
NVMf between two equal end-hosts with 56 cores across a Mellanox switch
using null_blk device:
READS without DIM:
blk size | BW | IOPS | 99th percentile latency | 99.99th latency
512B | 3.8GiB/s | 7.7M | 1401 usec | 2442 usec
4k | 7.0GiB/s | 1.8M | 4817 usec | 6587 usec
64k | 10.7GiB/s| 175k | 9896 usec | 10028 usec
IO WRITES without DIM:
blk size | BW | IOPS | 99th percentile latency | 99.99th latency
512B | 3.6GiB/s | 7.5M | 1434 usec | 2474 usec
4k | 6.3GiB/s | 1.6M | 938 usec | 1221 usec
64k | 10.7GiB/s| 175k | 8979 usec | 12780 usec
IO READS with DIM:
blk size | BW | IOPS | 99th percentile latency | 99.99th latency
512B | 4GiB/s | 8.2M | 816 usec | 889 usec
4k | 10.1GiB/s| 2.65M| 3359 usec | 5080 usec
64k | 10.7GiB/s| 175k | 9896 usec | 10028 usec
IO WRITES with DIM:
blk size | BW | IOPS | 99th percentile latency | 99.99th latency
512B | 3.9GiB/s | 8.1M | 799 usec | 922 usec
4k | 9.6GiB/s | 2.5M | 717 usec | 1004 usec
64k | 10.7GiB/s| 176k | 8586 usec | 12256 usec
The rdma_dim algorithm was designed to measure the effectiveness of
moderation on the flow in a general way and thus should be appropriate
for all RDMA storage protocols.
rdma_dim is configured to be the default option based on performance
improvement seen after extensive tests.
Signed-off-by: Yamin Friedman <yaminf@...lanox.com>
Reviewed-by: Max Gurtovoy <maxg@...lanox.com>
Reviewed-by: Leon Romanovsky <leonro@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
drivers/infiniband/core/cq.c | 71 ++++++++++++++++++++++++++++++-
drivers/infiniband/hw/mlx5/main.c | 2 +
2 files changed, 71 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index a4c81992267c..d8a8c466d897 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -26,6 +26,40 @@
#define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
+static void ib_cq_rdma_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct ib_cq *cq = (struct ib_cq *)dim->dim_owner;
+
+ u16 usec = rdma_dim_prof[dim->profile_ix].usec;
+ u16 comps = rdma_dim_prof[dim->profile_ix].comps;
+
+ dim->state = DIM_START_MEASURE;
+
+ cq->device->ops.modify_cq(cq, comps, usec);
+}
+
+static void rdma_dim_init(struct ib_cq *cq)
+{
+ struct dim *dim;
+
+ if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
+ cq->poll_ctx == IB_POLL_DIRECT)
+ return;
+
+ dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
+ if (!dim)
+ return;
+
+ dim->state = DIM_START_MEASURE;
+ dim->tune_state = DIM_GOING_RIGHT;
+ dim->profile_ix = RDMA_DIM_START_PROFILE;
+ dim->dim_owner = cq;
+ cq->dim = dim;
+
+ INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
+}
+
static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
int batch)
{
@@ -98,6 +132,24 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
return completed;
}
+static int ib_poll_dim_handler(struct irq_poll *iop, int budget)
+{
+ struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
+ struct dim *dim = cq->dim;
+ int completed;
+
+ completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
+ if (completed < budget) {
+ irq_poll_complete(&cq->iop);
+ if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
+ irq_poll_sched(&cq->iop);
+ }
+
+ rdma_dim(dim, completed);
+
+ return completed;
+}
+
static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
{
irq_poll_sched(&cq->iop);
@@ -105,14 +157,18 @@ static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
static void ib_cq_poll_work(struct work_struct *work)
{
- struct ib_cq *cq = container_of(work, struct ib_cq, work);
+ struct ib_cq *cq = container_of(work, struct ib_cq,
+ work);
int completed;
completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
IB_POLL_BATCH);
+
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(cq->comp_wq, &cq->work);
+ else if (cq->dim)
+ rdma_dim(cq->dim, completed);
}
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
@@ -166,6 +222,8 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_kadd(&cq->res);
+ rdma_dim_init(cq);
+
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
cq->comp_handler = ib_cq_completion_direct;
@@ -173,7 +231,13 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
case IB_POLL_SOFTIRQ:
cq->comp_handler = ib_cq_completion_softirq;
- irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
+ if (cq->dim) {
+ irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ,
+ ib_poll_dim_handler);
+ } else
+ irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ,
+ ib_poll_handler);
+
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
break;
case IB_POLL_WORKQUEUE:
@@ -226,6 +290,9 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
+ if (cq->dim)
+ cancel_work_sync(&cq->dim->work);
+ kfree(cq->dim);
kfree(cq->wc);
rdma_restrack_del(&cq->res);
ret = cq->device->ops.destroy_cq(cq, udata);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index abac70ad5c7c..b1b45dbe24a5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6305,6 +6305,8 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
mutex_init(&dev->lb.mutex);
+ dev->ib_dev.use_cq_dim = true;
+
return 0;
}
--
2.21.0
Powered by blists - more mailing lists