[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241108-packing-pack-fields-and-ice-implementation-v4-5-81a9f42c30e5@intel.com>
Date: Fri, 08 Nov 2024 17:30:18 -0800
From: Jacob Keller <jacob.e.keller@...el.com>
To: Vladimir Oltean <olteanv@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Tony Nguyen <anthony.l.nguyen@...el.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>,
Masahiro Yamada <masahiroy@...nel.org>, netdev <netdev@...r.kernel.org>
Cc: linux-kbuild@...r.kernel.org, Jacob Keller <jacob.e.keller@...el.com>,
Vladimir Oltean <vladimir.oltean@....com>
Subject: [PATCH net-next v4 5/9] ice: use structures to keep track of queue
context size
The ice Tx and Rx queue context are currently stored as arrays of bytes
with defined size (ICE_RXQ_CTX_SZ and ICE_TXQ_CTX_SZ). The packed queue
context is often passed to other functions as a simple u8 * pointer, which
does not allow tracking the size. This makes the queue context API easy to
misuse, as you can pass an arbitrary u8 array or pointer.
Introduce wrapper typedefs which use a __packed structure that has the
proper fixed size for the Tx and Rx context buffers. This enables the
compiler to track the size of the value and ensures that passing the wrong
buffer size will be detected by the compiler.
The existing APIs do not benefit much from this change, however the
wrapping structures will be used to simplify the arguments of new packing
functions based on the recently introduced pack_fields API.
Co-developed-by: Vladimir Oltean <vladimir.oltean@....com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@....com>
Signed-off-by: Jacob Keller <jacob.e.keller@...el.com>
---
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 11 +++++++++--
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 2 --
drivers/net/ethernet/intel/ice/ice_base.c | 2 +-
drivers/net/ethernet/intel/ice/ice_common.c | 24 +++++++++++-------------
4 files changed, 21 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1f01f3501d6b..a0ec9c97c2d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -12,6 +12,13 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -2067,10 +2074,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 0e8ed8c226e6..a76e5b0e7861 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -371,8 +371,6 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 3a8e156d7d86..260942877968 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -909,7 +909,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_set_ctx(hw, (u8 *)&tlan_ctx, (u8 *)&qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0f5a80269a7b..48d95cb49864 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1359,29 +1359,27 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static int ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
u8 i;
- if (!ice_rxq_ctx)
- return -EINVAL;
-
if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
/* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
}
return 0;
@@ -1426,15 +1424,15 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
if (!rlan_ctx)
return -EINVAL;
rlan_ctx->prefena = 1;
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ ice_set_ctx(hw, (u8 *)rlan_ctx, (u8 *)&buf, ice_rlan_ctx_info);
+ return ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
}
/* LAN Tx Queue Context */
--
2.47.0.265.g4ca455297942
Powered by blists - more mailing lists