[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20160818135939.002907410@linuxfoundation.org>
Date: Thu, 18 Aug 2016 15:59:35 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Bart Van Assche <bart.vanassche@...disk.com>,
Sagi Grimberg <sagi@...mberg.me>,
Christoph Hellwig <hch@....de>,
Steve Wise <swise@...ngridcomputing.com>,
Laurence Oberman <loberman@...hat.com>,
Parav Pandit <pandit.parav@...il.com>,
Nicholas Bellinger <nab@...ux-iscsi.org>,
Doug Ledford <dledford@...hat.com>
Subject: [PATCH 4.7 158/186] IB/core: Make rdma_rw_ctx_init() initialize all used fields
4.7-stable review patch. If anyone has any objections, please let me know.
------------------
From: Bart Van Assche <bart.vanassche@...disk.com>
commit eaa74ec7329a48a4b724d8de440b3a2cbaabf7c8 upstream.
Some but not all callers of rdma_rw_ctx_init() zero-initialize
struct rdma_rw_ctx. Hence make rdma_rw_ctx_init() initialize all
work request fields that will be read by ib_post_send().
Fixes: a060b5629ab0 ("IB/core: generic RDMA READ/WRITE API")
Signed-off-by: Bart Van Assche <bart.vanassche@...disk.com>
Reviewed-by: Sagi Grimberg <sagi@...mberg.me>
Reviewed-by: Christoph Hellwig <hch@....de>
Tested-by: Steve Wise <swise@...ngridcomputing.com>
Tested-by: Laurence Oberman <loberman@...hat.com>
Cc: Parav Pandit <pandit.parav@...il.com>
Cc: Nicholas Bellinger <nab@...ux-iscsi.org>
Signed-off-by: Doug Ledford <dledford@...hat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/infiniband/core/rw.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -71,6 +71,7 @@ static inline u32 rdma_rw_fr_page_list_l
return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
}
+/* Caller must have zero-initialized *reg. */
static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
u32 sg_cnt, u32 offset)
@@ -114,6 +115,7 @@ static int rdma_rw_init_mr_wrs(struct rd
u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
+ struct rdma_rw_reg_ctx *prev = NULL;
u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
int i, j, ret = 0, count = 0;
@@ -125,7 +127,6 @@ static int rdma_rw_init_mr_wrs(struct rd
}
for (i = 0; i < ctx->nr_ops; i++) {
- struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
u32 nents = min(sg_cnt, pages_per_mr);
@@ -162,9 +163,13 @@ static int rdma_rw_init_mr_wrs(struct rd
sg_cnt -= nents;
for (j = 0; j < nents; j++)
sg = sg_next(sg);
+ prev = reg;
offset = 0;
}
+ if (prev)
+ prev->wr.wr.next = NULL;
+
ctx->type = RDMA_RW_MR;
return count;
@@ -205,11 +210,10 @@ static int rdma_rw_init_map_wrs(struct r
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = remote_addr + total_len;
rdma_wr->rkey = rkey;
+ rdma_wr->wr.num_sge = nr_sge;
rdma_wr->wr.sg_list = sge;
for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
- rdma_wr->wr.num_sge++;
-
sge->addr = ib_sg_dma_address(dev, sg) + offset;
sge->length = ib_sg_dma_len(dev, sg) - offset;
sge->lkey = qp->pd->local_dma_lkey;
@@ -220,8 +224,8 @@ static int rdma_rw_init_map_wrs(struct r
offset = 0;
}
- if (i + 1 < ctx->nr_ops)
- rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
+ rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
+ &ctx->map.wrs[i + 1].wr : NULL;
}
ctx->type = RDMA_RW_MULTI_WR;
Powered by blists - more mailing lists