[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250520110823.1937981-7-karol.kolacinski@intel.com>
Date: Tue, 20 May 2025 13:06:26 +0200
From: Karol Kolacinski <karol.kolacinski@...el.com>
To: intel-wired-lan@...ts.osuosl.org
Cc: netdev@...r.kernel.org,
anthony.l.nguyen@...el.com,
przemyslaw.kitszel@...el.com,
richardcochran@...il.com,
Karol Kolacinski <karol.kolacinski@...el.com>,
Milena Olech <milena.olech@...el.com>
Subject: [PATCH iwl-next 1/4] ice: skip completion for sideband queue writes
Sideband queue (SBQ) is a HW queue with very short completion time. All
SBQ writes were posted by default, which means that the driver did not
have to wait for completion from the neighbor device, because there was
none. This introduced unnecessary delays, where only those delays were
"ensuring" that the command is "completed" and this was a potential race
condition.
Add the possibility to perform non-posted writes where it's necessary to
wait for completion, instead of relying on fake completion from the FW,
where only the delays are guarding the writes.
Flush the SBQ by reading address 0 from the PHY 0 before issuing SYNC
command to ensure that writes to all PHYs were completed and skip SBQ
message completion if it's posted.
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@...el.com>
Reviewed-by: Milena Olech <milena.olech@...el.com>
Signed-off-by: Karol Kolacinski <karol.kolacinski@...el.com>
---
drivers/net/ethernet/intel/ice/ice_common.c | 13 ++--
drivers/net/ethernet/intel/ice/ice_controlq.c | 4 ++
drivers/net/ethernet/intel/ice/ice_controlq.h | 1 +
drivers/net/ethernet/intel/ice/ice_ptp_hw.c | 62 +++++++++++--------
drivers/net/ethernet/intel/ice/ice_sbq_cmd.h | 5 +-
5 files changed, 52 insertions(+), 33 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 11a954e8dc62..53b9b5b54187 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1523,6 +1523,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
{
struct ice_sbq_cmd_desc desc = {0};
struct ice_sbq_msg_req msg = {0};
+ struct ice_sq_cd cd = {};
u16 msg_len;
int status;
@@ -1535,7 +1536,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
- if (in->opcode)
+ if (in->opcode == ice_sbq_msg_wr_p || in->opcode == ice_sbq_msg_wr_np)
msg.data = cpu_to_le32(in->data);
else
/* data read comes back in completion, so shorten the struct by
@@ -1543,10 +1544,12 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
*/
msg_len -= sizeof(msg.data);
+ cd.postpone = in->opcode == ice_sbq_msg_wr_p;
+
desc.flags = cpu_to_le16(flags);
desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
desc.param0.cmd_len = cpu_to_le16(msg_len);
- status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+ status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, &cd);
if (!status && !in->opcode)
in->data = le32_to_cpu
(((struct ice_sbq_msg_cmpl *)&msg)->data);
@@ -6260,7 +6263,7 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_rd,
.dest_dev = ice_sbq_dev_cgu,
- .msg_addr_low = addr
+ .msg_addr_low = addr,
};
int err;
@@ -6290,10 +6293,10 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_wr,
+ .opcode = ice_sbq_msg_wr_np,
.dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
- .data = val
+ .data = val,
};
int err;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index dcb837cadd18..5fb3a8441beb 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -1086,6 +1086,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
+ /* If the message is posted, don't wait for completion. */
+ if (cd && cd->postpone)
+ goto sq_send_command_error;
+
/* Wait for the command to complete. If it finishes within the
* timeout, copy the descriptor back to temp.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 788040dd662e..7c98d3a0314e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -77,6 +77,7 @@ struct ice_ctl_q_ring {
/* sq transaction details */
struct ice_sq_cd {
struct libie_aq_desc *wb_desc;
+ u8 postpone : 1;
};
/* rq event information */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 523f95271f35..9a4ecf1249ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -352,6 +352,13 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_sbq_dev_phy_0,
+ .opcode = ice_sbq_msg_rd,
+ };
+
+ /* Flush SBQ by reading address 0 on PHY 0 */
+ ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (!ice_is_primary(hw))
hw = ice_get_primary_hw(pf);
@@ -417,10 +424,10 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
struct ice_sbq_msg_input msg = {
.dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
- .opcode = ice_sbq_msg_wr,
+ .opcode = ice_sbq_msg_wr_p,
.msg_addr_low = lower_16_bits(addr),
.msg_addr_high = upper_16_bits(addr),
- .data = val
+ .data = val,
};
int err;
@@ -2342,11 +2349,12 @@ static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
static int
ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
- struct ice_sbq_msg_input msg = {0};
+ struct ice_sbq_msg_input msg = {
+ .opcode = ice_sbq_msg_rd,
+ };
int err;
ice_fill_phy_msg_e82x(hw, &msg, port, offset);
- msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
@@ -2419,12 +2427,13 @@ ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
static int
ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
- struct ice_sbq_msg_input msg = {0};
+ struct ice_sbq_msg_input msg = {
+ .opcode = ice_sbq_msg_wr_p,
+ .data = val,
+ };
int err;
ice_fill_phy_msg_e82x(hw, &msg, port, offset);
- msg.opcode = ice_sbq_msg_wr;
- msg.data = val;
err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
@@ -2578,15 +2587,15 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
int
ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
{
- struct ice_sbq_msg_input msg = {0};
+ struct ice_sbq_msg_input msg = {
+ .opcode = ice_sbq_msg_rd,
+ };
int err;
err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
- msg.opcode = ice_sbq_msg_rd;
-
err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
@@ -2612,16 +2621,16 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
int
ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
{
- struct ice_sbq_msg_input msg = {0};
+ struct ice_sbq_msg_input msg = {
+ .opcode = ice_sbq_msg_wr_p,
+ .data = val,
+ };
int err;
err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
- msg.opcode = ice_sbq_msg_wr;
- msg.data = val;
-
err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
@@ -4259,13 +4268,14 @@ static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
*/
static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input msg = {0};
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_sbq_dev_phy_0,
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ };
int err;
- msg.msg_addr_low = lower_16_bits(addr);
- msg.msg_addr_high = upper_16_bits(addr);
- msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
@@ -4289,15 +4299,15 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
*/
static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
{
- struct ice_sbq_msg_input msg = {0};
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_sbq_dev_phy_0,
+ .opcode = ice_sbq_msg_wr_p,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val,
+ };
int err;
- msg.msg_addr_low = lower_16_bits(addr);
- msg.msg_addr_high = upper_16_bits(addr);
- msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = ice_sbq_dev_phy_0;
- msg.data = val;
-
err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 183dd5457d6a..7960f888a655 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -53,8 +53,9 @@ enum ice_sbq_dev_id {
};
enum ice_sbq_msg_opcode {
- ice_sbq_msg_rd = 0x00,
- ice_sbq_msg_wr = 0x01
+ ice_sbq_msg_rd = 0x00,
+ ice_sbq_msg_wr_p = 0x01,
+ ice_sbq_msg_wr_np = 0x02,
};
#define ICE_SBQ_MSG_FLAGS 0x40
--
2.49.0
Powered by blists - more mailing lists