[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240705085937.1644229-2-huangjunxian6@hisilicon.com>
Date: Fri, 5 Jul 2024 16:59:29 +0800
From: Junxian Huang <huangjunxian6@...ilicon.com>
To: <jgg@...pe.ca>, <leon@...nel.org>
CC: <linux-rdma@...r.kernel.org>, <linuxarm@...wei.com>,
<linux-kernel@...r.kernel.org>, <huangjunxian6@...ilicon.com>
Subject: [PATCH for-rc 1/9] RDMA/hns: Check atomic wr length
8 bytes is the only supported length of atomic. Return an error if
it is not.
Fixes: 384f88185112 ("RDMA/hns: Add atomic support")
Signed-off-by: Junxian Huang <huangjunxian6@...ilicon.com>
---
drivers/infiniband/hw/hns/hns_roce_device.h | 2 ++
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 19 +++++++++++++++----
2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index ff0b3f68ee3a..05005079258c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -91,6 +91,8 @@
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
+#define ATOMIC_WR_LEN 8
+
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 4287818a737f..a5d746a5cc68 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -164,15 +164,23 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
hr_reg_clear(fseg, FRMR_BLK_MODE);
}
-static void set_atomic_seg(const struct ib_send_wr *wr,
- struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
- unsigned int valid_num_sge)
+static int set_atomic_seg(struct hns_roce_dev *hr_dev,
+ const struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ unsigned int valid_num_sge, u32 msg_len)
{
struct hns_roce_v2_wqe_data_seg *dseg =
(void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
struct hns_roce_wqe_atomic_seg *aseg =
(void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
+ if (msg_len != ATOMIC_WR_LEN) {
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "invalid atomic wr len, len = %u.\n",
+ msg_len);
+ return -EINVAL;
+ }
+
set_data_seg_v2(dseg, wr->sg_list);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
@@ -185,6 +193,8 @@ static void set_atomic_seg(const struct ib_send_wr *wr,
}
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
+
+ return 0;
}
static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
@@ -592,7 +602,8 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
+ ret = set_atomic_seg(hr_dev, wr, rc_sq_wqe, valid_num_sge,
+ msg_len);
else if (wr->opcode != IB_WR_REG_MR)
ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
&curr_idx, valid_num_sge);
--
2.33.0
Powered by blists - more mailing lists