lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221005174521.63619-1-rohit.sajan.kumar@oracle.com>
Date:   Wed,  5 Oct 2022 10:45:20 -0700
From:   Rohit Nair <rohit.sajan.kumar@...cle.com>
To:     leon@...nel.org, jgg@...pe.ca, saeedm@...dia.com,
        davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
        pabeni@...hat.com, linux-rdma@...r.kernel.org,
        linux-kernel@...r.kernel.org, netdev@...r.kernel.org
Cc:     manjunath.b.patil@...cle.com, rama.nichanamatlu@...cle.com,
        rohit.sajan.kumar@...cle.com,
        Michael Guralnik <michaelgur@...dia.com>
Subject: [PATCH 1/1] IB/mlx5: Add a signature check to received EQEs and CQEs

As PRM defines, the bytewise XOR of the EQE and the EQE index should be
0xff. Otherwise, we can assume we have a corrupt EQE. The same is
applicable to CQE as well.

Adding a check to verify the EQE and CQE is valid in that aspect and if
not, dump the CQE and EQE to dmesg to be inspected.

This patch does not introduce any significant performance degradations
and has been tested using qperf.

Suggested-by: Michael Guralnik <michaelgur@...dia.com>
Signed-off-by: Rohit Nair <rohit.sajan.kumar@...cle.com>
---
 drivers/infiniband/hw/mlx5/cq.c              | 40 ++++++++++++++++++++++++++++
 drivers/net/ethernet/mellanox/mlx5/core/eq.c | 39 +++++++++++++++++++++++++++
 2 files changed, 79 insertions(+)

diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index be189e0..2a6d722 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -441,6 +441,44 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
 	}
 }
 
+static void verify_cqe(struct mlx5_cqe64 *cqe64, struct mlx5_ib_cq *cq)
+{
+	int i = 0;
+	u64 temp_xor = 0;
+	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+
+	u32 cons_index = cq->mcq.cons_index;
+	u64 *eight_byte_raw_cqe = (u64 *)cqe64;
+	u8 *temp_bytewise_xor = (u8 *)(&temp_xor);
+	u8 cqe_bytewise_xor = (cons_index & 0xff) ^
+				((cons_index & 0xff00) >> 8) ^
+				((cons_index & 0xff0000) >> 16);
+
+	for (i = 0; i < sizeof(struct mlx5_cqe64); i += 8) {
+		temp_xor ^= *eight_byte_raw_cqe;
+		eight_byte_raw_cqe++;
+	}
+
+	for (i = 0; i < (sizeof(u64)); i++) {
+		cqe_bytewise_xor ^= *temp_bytewise_xor;
+		temp_bytewise_xor++;
+	}
+
+	if (cqe_bytewise_xor == 0xff)
+		return;
+
+	dev_err(&dev->mdev->pdev->dev,
+		"Faulty CQE - checksum failure: cqe=0x%x cqn=0x%x cqe_bytewise_xor=0x%x\n",
+		cq->ibcq.cqe, cq->mcq.cqn, cqe_bytewise_xor);
+	dev_err(&dev->mdev->pdev->dev,
+		"cons_index=%u arm_sn=%u irqn=%u cqe_size=0x%x\n",
+		cq->mcq.cons_index, cq->mcq.arm_sn, cq->mcq.irqn, cq->mcq.cqe_sz);
+
+	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+		       16, 1, cqe64, sizeof(*cqe64), false);
+	BUG();
+}
+
 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
 			 struct mlx5_ib_qp **cur_qp,
 			 struct ib_wc *wc)
@@ -463,6 +501,8 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
 
 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
 
+	verify_cqe(cqe64, cq);
+
 	++cq->mcq.cons_index;
 
 	/* Make sure we read CQ entry contents after we've checked the
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 229728c..f2a6d8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -102,6 +102,43 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
 	return cq;
 }
 
+static void verify_eqe(struct mlx5_eq *eq, struct mlx5_eqe *eqe)
+{
+	u64 *eight_byte_raw_eqe = (u64 *)eqe;
+	u8 eqe_bytewise_xor = (eq->cons_index & 0xff) ^
+			      ((eq->cons_index & 0xff00) >> 8) ^
+			      ((eq->cons_index & 0xff0000) >> 16);
+
+	int i = 0;
+	u64 temp_xor = 0;
+	u8 *temp_bytewise_xor = (u8 *)(&temp_xor);
+
+	for (i = 0; i < sizeof(struct mlx5_eqe); i += 8) {
+		temp_xor ^= *eight_byte_raw_eqe;
+		eight_byte_raw_eqe++;
+	}
+
+	for (i = 0; i < (sizeof(u64)); i++) {
+		eqe_bytewise_xor ^= *temp_bytewise_xor;
+		temp_bytewise_xor++;
+	}
+
+	if (eqe_bytewise_xor == 0xff)
+		return;
+
+	dev_err(&eq->dev->pdev->dev,
+		"Faulty EQE - checksum failure: ci=0x%x eqe_type=0x%x eqe_bytewise_xor=0x%x",
+		eq->cons_index, eqe->type, eqe_bytewise_xor);
+
+	dev_err(&eq->dev->pdev->dev,
+		"EQ addr=%p eqn=%u irqn=%u vec_index=%u",
+		eq, eq->eqn, eq->irqn, eq->vecidx);
+
+	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+		       16, 1, eqe, sizeof(*eqe), false);
+	BUG();
+}
+
 static int mlx5_eq_comp_int(struct notifier_block *nb,
 			    __always_unused unsigned long action,
 			    __always_unused void *data)
@@ -127,6 +164,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
 		/* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
 		cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
 
+		verify_eqe(eq, eqe);
+
 		cq = mlx5_eq_cq_get(eq, cqn);
 		if (likely(cq)) {
 			++cq->arm_sn;
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ