lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 20 Sep 2022 00:18:47 +0200
From:   Andrew Lunn <andrew@...n.ch>
To:     mattias.forsblad@...il.com
Cc:     netdev <netdev@...r.kernel.org>,
        Florian Fainelli <f.fainelli@...il.com>,
        Vladimir Oltean <vladimir.oltean@....com>,
        Christian Marangi <ansuelsmth@...il.com>,
        Andrew Lunn <andrew@...n.ch>
Subject: [PATCH rfc v0 3/9] net: dsa: qca8K: Move queuing for request frame into the core

Combine the queuing of the request and waiting for the completion into
one core helper. Add the function dsa_rmu_request() to perform this.

Access to statistics is not a strict request/reply, so the
dsa_rmu_wait_for_completion needs to be kept.

It is also no possible to combine dsa_rmu_request() and
dsa_rmu_wait_for_completion() since we need to avoid the race of
sending the request, receiving a reply, and the completion has not
been reinitialised because the schedule at decided to do other things.

Signed-off-by: Andrew Lunn <andrew@...n.ch>
---
 drivers/net/dsa/qca/qca8k-8xxx.c | 32 ++++++++++----------------------
 include/net/dsa.h                |  2 ++
 net/dsa/dsa.c                    | 16 ++++++++++++++++
 3 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index f4e92156bd32..9c44a09590a6 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -253,10 +253,8 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(skb);
-
-	ret = dsa_inband_wait_for_completion(&mgmt_eth_data->inband,
-					     QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+				 QCA8K_ETHERNET_TIMEOUT);
 
 	*val = mgmt_eth_data->data[0];
 	if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -303,10 +301,8 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(skb);
-
-	ret = dsa_inband_wait_for_completion(&mgmt_eth_data->inband,
-					     QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+				 QCA8K_ETHERNET_TIMEOUT);
 
 	ack = mgmt_eth_data->ack;
 
@@ -449,10 +445,8 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(skb);
-
-	ret = dsa_inband_wait_for_completion(&mgmt_eth_data->inband,
-					     QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_inband_request(&mgmt_eth_data->inband, skb,
+				 QCA8K_ETHERNET_TIMEOUT);
 
 	ack = mgmt_eth_data->ack;
 
@@ -539,10 +533,8 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(write_skb);
-
-	ret = dsa_inband_wait_for_completion(&mgmt_eth_data->inband,
-					     QCA8K_ETHERNET_TIMEOUT);
+	ret = dsa_inband_request(&mgmt_eth_data->inband, write_skb,
+				 QCA8K_ETHERNET_TIMEOUT);
 
 	ack = mgmt_eth_data->ack;
 
@@ -574,10 +566,8 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
 		mgmt_eth_data->ack = false;
 
-		dev_queue_xmit(read_skb);
-
-		ret = dsa_inband_wait_for_completion(&mgmt_eth_data->inband,
-						     QCA8K_ETHERNET_TIMEOUT);
+		ret = dsa_inband_request(&mgmt_eth_data->inband, read_skb,
+					 QCA8K_ETHERNET_TIMEOUT);
 
 		ack = mgmt_eth_data->ack;
 
@@ -601,8 +591,6 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
 	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
 	mgmt_eth_data->ack = false;
 
-	dev_queue_xmit(clear_skb);
-
 	dsa_inband_wait_for_completion(&mgmt_eth_data->inband,
 				       QCA8K_ETHERNET_TIMEOUT);
 
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ca81541703f4..50c319832939 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -1286,6 +1286,8 @@ struct dsa_inband {
 
 void dsa_inband_init(struct dsa_inband *inband);
 void dsa_inband_complete(struct dsa_inband *inband);
+int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
+		       int timeout_ms);
 int dsa_inband_wait_for_completion(struct dsa_inband *inband, int timeout_ms);
 
 /* Keep inline for faster access in hot path */
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 382dbb9e921a..8de0c3124abf 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -540,6 +540,22 @@ int dsa_inband_wait_for_completion(struct dsa_inband *inband, int timeout_ms)
 }
 EXPORT_SYMBOL_GPL(dsa_inband_wait_for_completion);
 
+/* Cannot use dsa_inband_wait_completion since the completion needs to be
+ * reinitialized before the skb is queue to avoid races.
+ */
+int dsa_inband_request(struct dsa_inband *inband, struct sk_buff *skb,
+		       int timeout_ms)
+{
+	unsigned long jiffies = msecs_to_jiffies(timeout_ms);
+
+	reinit_completion(&inband->completion);
+
+	dev_queue_xmit(skb);
+
+	return wait_for_completion_timeout(&inband->completion, jiffies);
+}
+EXPORT_SYMBOL_GPL(dsa_inband_request);
+
 static int __init dsa_init_module(void)
 {
 	int rc;
-- 
2.37.2

Powered by blists - more mailing lists