[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220919221853.4095491-6-andrew@lunn.ch>
Date: Tue, 20 Sep 2022 00:18:49 +0200
From: Andrew Lunn <andrew@...n.ch>
To: mattias.forsblad@...il.com
Cc: netdev <netdev@...r.kernel.org>,
Florian Fainelli <f.fainelli@...il.com>,
Vladimir Oltean <vladimir.oltean@....com>,
Christian Marangi <ansuelsmth@...il.com>,
Andrew Lunn <andrew@...n.ch>
Subject: [PATCH rfc v0 5/9] net: dsa: qca8k: Move request sequence number handling into core
Each request/reply frame is likely to have a sequence number so that
request and the reply can be matched together. Move this sequence
number into the inband structure. The driver must provide a helper to
insert the sequence number into the skb, and the core will perform the
increment.
To allow different devices to have different size sequence numbers, a
mask is provided. This can be used for example to reduce the u32
sequence number down to a u8.
Signed-off-by: Andrew Lunn <andrew@...n.ch>
---
drivers/net/dsa/qca/qca8k-8xxx.c | 35 +++++++++-----------------------
drivers/net/dsa/qca/qca8k.h | 1 -
include/net/dsa.h | 6 +++++-
net/dsa/dsa.c | 16 ++++++++++++++-
4 files changed, 30 insertions(+), 28 deletions(-)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 9481a248273a..a354ba070d33 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -146,7 +146,7 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
/* Make sure the seq match the requested packet */
- if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+ if (mgmt_ethhdr->seq == dsa_inband_seqno(&mgmt_eth_data->inband))
mgmt_eth_data->ack = true;
if (cmd == MDIO_READ) {
@@ -247,14 +247,11 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
}
skb->dev = priv->mgmt_master;
-
- /* Increment seq_num and set it in the mdio pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
- QCA8K_ETHERNET_TIMEOUT);
+ qca8k_mdio_header_fill_seq_num,
+ QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -295,13 +292,10 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
}
skb->dev = priv->mgmt_master;
-
- /* Increment seq_num and set it in the mdio pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -440,12 +434,10 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
bool ack;
int ret;
- /* Increment seq_num and set it in the copy pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -527,13 +519,10 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
read_skb->dev = mgmt_master;
clear_skb->dev = mgmt_master;
write_skb->dev = mgmt_master;
-
- /* Increment seq_num and set it in the write pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, write_skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -560,12 +549,10 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
}
if (read) {
- /* Increment seq_num and set it in the read pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, read_skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -583,12 +570,10 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
kfree_skb(read_skb);
}
exit:
- /* Increment seq_num and set it in the clear pkt */
- mgmt_eth_data->seq++;
- qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
ret = dsa_inband_request(&mgmt_eth_data->inband, clear_skb,
+ qca8k_mdio_header_fill_seq_num,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
@@ -1901,10 +1886,10 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
return -ENOMEM;
mutex_init(&priv->mgmt_eth_data.mutex);
- dsa_inband_init(&priv->mgmt_eth_data.inband);
+ dsa_inband_init(&priv->mgmt_eth_data.inband, U32_MAX);
mutex_init(&priv->mib_eth_data.mutex);
- dsa_inband_init(&priv->mib_eth_data.inband);
+ dsa_inband_init(&priv->mib_eth_data.inband, U32_MAX);
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 685628716ed2..a5abc340471c 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -349,7 +349,6 @@ struct qca8k_mgmt_eth_data {
struct dsa_inband inband;
struct mutex mutex; /* Enforce one mdio read/write at time */
bool ack;
- u32 seq;
u32 data[4];
};
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 50c319832939..2d6b7c7f158b 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -1282,13 +1282,17 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
*/
struct dsa_inband {
struct completion completion;
+ u32 seqno;
+ u32 seqno_mask;
};
-void dsa_inband_init(struct dsa_inband *inband);
+void dsa_inband_init(struct dsa_inband *inband, u32 seqno_mask);
void dsa_inband_complete(struct dsa_inband *inband);
int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
+ void (*insert_seqno)(struct sk_buff *skb, u32 seqno),
int timeout_ms);
int dsa_inband_wait_for_completion(struct dsa_inband *inband, int timeout_ms);
+u32 dsa_inband_seqno(struct dsa_inband *inband);
/* Keep inline for faster access in hot path */
static inline bool netdev_uses_dsa(const struct net_device *dev)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 68576f1c5b02..5a8d95f8acec 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -518,9 +518,11 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
-void dsa_inband_init(struct dsa_inband *inband)
+void dsa_inband_init(struct dsa_inband *inband, u32 seqno_mask)
{
init_completion(&inband->completion);
+ inband->seqno_mask = seqno_mask;
+ inband->seqno = 0;
}
EXPORT_SYMBOL_GPL(dsa_inband_init);
@@ -544,6 +546,7 @@ EXPORT_SYMBOL_GPL(dsa_inband_wait_for_completion);
* reinitialized before the skb is queue to avoid races.
*/
int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
+ void (*insert_seqno)(struct sk_buff *skb, u32 seqno),
int timeout_ms)
{
unsigned long jiffies = msecs_to_jiffies(timeout_ms);
@@ -551,6 +554,11 @@ int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
reinit_completion(&inband->completion);
+ if (insert_seqno) {
+ inband->seqno++;
+ insert_seqno(skb, inband->seqno & inband->seqno_mask);
+ }
+
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&inband->completion, jiffies);
@@ -562,6 +570,12 @@ int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(dsa_inband_request);
+u32 dsa_inband_seqno(struct dsa_inband *inband)
+{
+ return inband->seqno & inband->seqno_mask;
+}
+EXPORT_SYMBOL_GPL(dsa_inband_seqno);
+
static int __init dsa_init_module(void)
{
int rc;
--
2.37.2
Powered by blists - more mailing lists