[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <01d0284d68157baf6dafdaa22056380b96b96076.1592942138.git.rahul.lakkireddy@chelsio.com>
Date: Wed, 24 Jun 2020 01:51:32 +0530
From: Rahul Lakkireddy <rahul.lakkireddy@...lsio.com>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net, nirranjan@...lsio.com, vishal@...lsio.com,
dt@...lsio.com
Subject: [PATCH net 02/12] cxgb4: move PTP lock and unlock to caller in Tx path
Check for whether PTP is enabled or not at the caller and perform
locking/unlocking at the caller.
Fixes following sparse warning:
sge.c:1641:26: warning: context imbalance in 'cxgb4_eth_xmit' -
different lock contexts for basic block
Fixes: a456950445a0 ("cxgb4: time stamping interface for PTP")
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@...lsio.com>
---
drivers/net/ethernet/chelsio/cxgb4/sge.c | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1359158652b7..f7b5a2f11001 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1425,12 +1425,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
- spin_lock(&adap->ptp_lock);
if (!(adap->ptp_tx_skb)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
adap->ptp_tx_skb = skb_get(skb);
} else {
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
q = &adap->sge.ptptxq;
@@ -1444,11 +1442,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef CONFIG_CHELSIO_T4_FCOE
ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(ret == -ENOTSUPP)) {
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
+ if (unlikely(ret == -EOPNOTSUPP))
goto out_free;
- }
#endif /* CONFIG_CHELSIO_T4_FCOE */
chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
@@ -1461,8 +1456,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_BUSY;
}
@@ -1481,8 +1474,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
@@ -1630,8 +1621,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
out_free:
@@ -2377,6 +2366,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(qid >= pi->nqsets))
return cxgb4_ethofld_xmit(skb, dev);
+ if (is_ptp_enabled(skb, dev)) {
+ struct adapter *adap = netdev2adap(dev);
+ netdev_tx_t ret;
+
+ spin_lock(&adap->ptp_lock);
+ ret = cxgb4_eth_xmit(skb, dev);
+ spin_unlock(&adap->ptp_lock);
+ return ret;
+ }
+
return cxgb4_eth_xmit(skb, dev);
}
--
2.24.0
Powered by blists - more mailing lists