lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241019133908.fy46uasva3tdwtmk@skbuf>
Date: Sat, 19 Oct 2024 16:39:08 +0300
From: Vladimir Oltean <olteanv@...il.com>
To: Jacob Keller <jacob.e.keller@...el.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
	Eric Dumazet <edumazet@...gle.com>,
	Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
	Tony Nguyen <anthony.l.nguyen@...el.com>,
	Przemek Kitszel <przemyslaw.kitszel@...el.com>,
	linux-kernel@...r.kernel.org, netdev@...r.kernel.org
Subject: Re: [PATCH net-next 5/8] ice: use <linux/packing.h> for Tx and Rx
 queue context data

On Fri, Oct 11, 2024 at 11:48:33AM -0700, Jacob Keller wrote:
> +/**
> + * __ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
> + * @ctx: the Rx queue context to pack
> + * @buf: the HW buffer to pack into
> + * @len: size of the HW buffer
> + *
> + * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
> + * bit-packed HW layout.
> + */
> +void __ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, void *buf, size_t len)
> +{
> +	CHECK_PACKED_FIELDS_20(ice_rlan_ctx_fields, ICE_RXQ_CTX_SZ);
> +	WARN_ON_ONCE(len < ICE_RXQ_CTX_SZ);

Why not warn on the != condition? The buffer shouldn't be larger, either.

> +
> +	pack_fields(buf, len, ctx, ice_rlan_ctx_fields,
> +		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
> +}
> +/**
> + * __ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
> + * @ctx: the Tx queue context to pack
> + * @buf: the HW buffer to pack into
> + * @len: size of the HW buffer
> + *
> + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
> + * bit-packed HW layout.
> + */
> +void __ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, void *buf, size_t len)
> +{
> +	CHECK_PACKED_FIELDS_27(ice_tlan_ctx_fields, ICE_TXQ_CTX_SZ);
> +	WARN_ON_ONCE(len < ICE_TXQ_CTX_SZ);

Same question here.

> +
> +	pack_fields(buf, len, ctx, ice_tlan_ctx_fields,
> +		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
> +}

In fact, I don't know why you don't write the code in a way in which the
_compiler_ will error out if you mess up something in the way that the
arguments are passed, rather than introduce code that warns at runtime?

Something like this:

diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1f01f3501d6b..a0ec9c97c2d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -12,6 +12,13 @@
 #define ICE_AQC_TOPO_MAX_LEVEL_NUM	0x9
 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX	9728
 
+#define ICE_RXQ_CTX_SIZE_DWORDS		8
+#define ICE_RXQ_CTX_SZ			(ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TXQ_CTX_SZ			22
+
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
 struct ice_aqc_generic {
 	__le32 param0;
 	__le32 param1;
@@ -2067,10 +2074,10 @@ struct ice_aqc_add_txqs_perq {
 	__le16 txq_id;
 	u8 rsvd[2];
 	__le32 q_teid;
-	u8 txq_ctx[22];
+	ice_txq_ctx_buf_t txq_ctx;
 	u8 rsvd2[2];
 	struct ice_aqc_txsched_elem info;
-};
+} __packed;
 
 /* The format of the command buffer for Add Tx LAN Queues (0x0C30)
  * is an array of the following structs. Please note that the length of
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c9b2170a3f5c..f1fbba19e4e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -912,7 +912,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
 	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
 	/* copy context contents into the qg_buf */
 	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
-	ice_pack_txq_ctx(&tlan_ctx, qg_buf->txqs[0].txq_ctx);
+	ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
 
 	/* init queue specific tail reg. It is referred as
 	 * transmit comm scheduler queue doorbell.
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e974290f1801..57a4142a9396 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1363,12 +1363,13 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
  * @rxq_ctx: pointer to the packed Rx queue context
  * @rxq_index: the index of the Rx queue
  */
-static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *rxq_ctx,
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+				   const ice_rxq_ctx_buf_t *rxq_ctx,
 				   u32 rxq_index)
 {
 	/* Copy each dword separately to HW */
 	for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
-		u32 ctx = ((u32 *)rxq_ctx)[i];
+		u32 ctx = ((const u32 *)rxq_ctx)[i];
 
 		wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
 
@@ -1405,20 +1406,20 @@ static const struct packed_field_s ice_rlan_ctx_fields[] = {
 };
 
 /**
- * __ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
  * @ctx: the Rx queue context to pack
  * @buf: the HW buffer to pack into
- * @len: size of the HW buffer
  *
  * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
  * bit-packed HW layout.
  */
-void __ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, void *buf, size_t len)
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+			     ice_rxq_ctx_buf_t *buf)
 {
 	CHECK_PACKED_FIELDS_20(ice_rlan_ctx_fields, ICE_RXQ_CTX_SZ);
-	WARN_ON_ONCE(len < ICE_RXQ_CTX_SZ);
+	BUILD_BUG_ON(sizeof(*buf) != ICE_RXQ_CTX_SZ);
 
-	pack_fields(buf, len, ctx, ice_rlan_ctx_fields,
+	pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
 		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
 }
 
@@ -1436,14 +1437,13 @@ void __ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, void *buf, size_t len)
 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
 		      u32 rxq_index)
 {
-	u8 ctx_buf[ICE_RXQ_CTX_SZ] = {};
+	ice_rxq_ctx_buf_t buf = {};
 
 	if (rxq_index > QRX_CTRL_MAX_INDEX)
 		return -EINVAL;
 
-	ice_pack_rxq_ctx(rlan_ctx, ctx_buf);
-
-	ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+	ice_pack_rxq_ctx(rlan_ctx, &buf);
+	ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
 
 	return 0;
 }
@@ -1481,20 +1481,19 @@ static const struct packed_field_s ice_tlan_ctx_fields[] = {
 };
 
 /**
- * __ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
  * @ctx: the Tx queue context to pack
  * @buf: the HW buffer to pack into
- * @len: size of the HW buffer
  *
  * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
  * bit-packed HW layout.
  */
-void __ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, void *buf, size_t len)
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
 {
 	CHECK_PACKED_FIELDS_27(ice_tlan_ctx_fields, ICE_TXQ_CTX_SZ);
-	WARN_ON_ONCE(len < ICE_TXQ_CTX_SZ);
+	BUILD_BUG_ON(sizeof(*buf) != ICE_TXQ_CTX_SZ);
 
-	pack_fields(buf, len, ctx, ice_tlan_ctx_fields,
+	pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
 		    QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
 }
 
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 88d1cebcb3dc..a68bea3934e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -93,14 +93,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
 
-void __ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, void *buf, size_t len);
-void __ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, void *buf, size_t len);
-
-#define ice_pack_rxq_ctx(rlan_ctx, buf) \
-	__ice_pack_rxq_ctx((rlan_ctx), (buf), sizeof(buf))
-
-#define ice_pack_txq_ctx(tlan_ctx, buf) \
-	__ice_pack_txq_ctx((tlan_ctx), (buf), sizeof(buf))
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
 
 extern struct mutex ice_global_cfg_lock_sw;
 
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 618cc39bd397..1479b45738af 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -371,8 +371,6 @@ enum ice_rx_flex_desc_status_error_1_bits {
 	ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
-#define ICE_RXQ_CTX_SIZE_DWORDS		8
-#define ICE_RXQ_CTX_SZ			(ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
 #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS	22
 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS	5
 #define GLTCLAN_CQ_CNTX(i, CQ)		(GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
@@ -531,8 +529,6 @@ enum ice_tx_ctx_desc_eipt_offload {
 #define ICE_LAN_TXQ_MAX_QGRPS	127
 #define ICE_LAN_TXQ_MAX_QDIS	1023
 
-#define ICE_TXQ_CTX_SZ		22
-
 /* Tx queue context data */
 struct ice_tlan_ctx {
 #define ICE_TLAN_CTX_BASE_S	7
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ