[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220914053041.1615876-7-mattias.forsblad@gmail.com>
Date: Wed, 14 Sep 2022 07:30:41 +0200
From: Mattias Forsblad <mattias.forsblad@...il.com>
To: netdev@...r.kernel.org
Cc: Andrew Lunn <andrew@...n.ch>,
Vivien Didelot <vivien.didelot@...il.com>,
Florian Fainelli <f.fainelli@...il.com>,
Vladimir Oltean <olteanv@...il.com>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, linux@...linux.org.uk,
Mattias Forsblad <mattias.forsblad@...il.com>
Subject: [PATCH net-next v11 6/6] net: dsa: qca8k: Use new convenience functions
Use the new common convenience functions for sending and
waiting for frames.
Signed-off-by: Mattias Forsblad <mattias.forsblad@...il.com>
---
drivers/net/dsa/qca/qca8k-8xxx.c | 61 +++++++++-----------------------
1 file changed, 17 insertions(+), 44 deletions(-)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index c181346388a4..4e9bc103c0a5 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -160,7 +160,7 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
QCA_HDR_MGMT_DATA2_LEN);
}
- complete(&mgmt_eth_data->rw_done);
+ dsa_switch_inband_complete(ds, &mgmt_eth_data->rw_done);
}
static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
@@ -228,6 +228,7 @@ static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct dsa_switch *ds = priv->ds;
struct sk_buff *skb;
bool ack;
int ret;
@@ -248,17 +249,12 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
skb->dev = priv->mgmt_master;
- reinit_completion(&mgmt_eth_data->rw_done);
-
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
- dev_queue_xmit(skb);
-
- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -280,6 +276,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct dsa_switch *ds = priv->ds;
struct sk_buff *skb;
bool ack;
int ret;
@@ -300,17 +297,12 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
skb->dev = priv->mgmt_master;
- reinit_completion(&mgmt_eth_data->rw_done);
-
/* Increment seq_num and set it in the mdio pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
- dev_queue_xmit(skb);
-
- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -441,24 +433,21 @@ static struct regmap_config qca8k_regmap_config = {
};
static int
-qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+qca8k_phy_eth_busy_wait(struct qca8k_priv *priv,
struct sk_buff *read_skb, u32 *val)
{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ struct dsa_switch *ds = priv->ds;
bool ack;
int ret;
- reinit_completion(&mgmt_eth_data->rw_done);
-
/* Increment seq_num and set it in the copy pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
- dev_queue_xmit(skb);
-
- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- QCA8K_ETHERNET_TIMEOUT);
+ ret = dsa_switch_inband_tx(ds, skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -480,6 +469,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
struct sk_buff *write_skb, *clear_skb, *read_skb;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
+ struct dsa_switch *ds = priv->ds;
struct net_device *mgmt_master;
int ret, ret1;
bool ack;
@@ -540,17 +530,12 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
clear_skb->dev = mgmt_master;
write_skb->dev = mgmt_master;
- reinit_completion(&mgmt_eth_data->rw_done);
-
/* Increment seq_num and set it in the write pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
- dev_queue_xmit(write_skb);
-
- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- QCA8K_ETHERNET_TIMEOUT);
+ ret = dsa_switch_inband_tx(ds, write_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -569,7 +554,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
!(val & QCA8K_MDIO_MASTER_BUSY), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- mgmt_eth_data, read_skb, &val);
+ priv, read_skb, &val);
if (ret < 0 && ret1 < 0) {
ret = ret1;
@@ -577,17 +562,13 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
}
if (read) {
- reinit_completion(&mgmt_eth_data->rw_done);
-
/* Increment seq_num and set it in the read pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
- dev_queue_xmit(read_skb);
-
- ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- QCA8K_ETHERNET_TIMEOUT);
+ ret = dsa_switch_inband_tx(ds, read_skb, &mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -606,17 +587,12 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
kfree_skb(read_skb);
}
exit:
- reinit_completion(&mgmt_eth_data->rw_done);
-
/* Increment seq_num and set it in the clear pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
- dev_queue_xmit(clear_skb);
-
- wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- QCA8K_ETHERNET_TIMEOUT);
+ ret = dsa_switch_inband_tx(ds, clear_skb, &mgmt_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
@@ -1528,7 +1504,7 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
exit:
/* Complete on receiving all the mib packet */
if (refcount_dec_and_test(&mib_eth_data->port_parsed))
- complete(&mib_eth_data->rw_done);
+ dsa_switch_inband_complete(ds, &mib_eth_data->rw_done);
}
static int
@@ -1543,8 +1519,6 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
mutex_lock(&mib_eth_data->mutex);
- reinit_completion(&mib_eth_data->rw_done);
-
mib_eth_data->req_port = dp->index;
mib_eth_data->data = data;
refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
@@ -1562,8 +1536,7 @@ qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
if (ret)
goto exit;
- ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
-
+ ret = dsa_switch_inband_tx(ds, NULL, &mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
exit:
mutex_unlock(&mib_eth_data->mutex);
--
2.25.1
Powered by blists - more mailing lists