[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230104194132.24637-2-gerhard@engleder-embedded.com>
Date: Wed, 4 Jan 2023 20:41:24 +0100
From: Gerhard Engleder <gerhard@...leder-embedded.com>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net, kuba@...nel.org, edumazet@...gle.com,
pabeni@...hat.com, Gerhard Engleder <gerhard@...leder-embedded.com>
Subject: [PATCH net-next v3 1/9] tsnep: Use spin_lock_bh for TX
TX processing is done only within process or BH context. Therefore,
_irqsafe variant is not necessary.
Signed-off-by: Gerhard Engleder <gerhard@...leder-embedded.com>
---
drivers/net/ethernet/engleder/tsnep_main.c | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index bf0190e1d2ea..7cc5e2407809 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -434,7 +434,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
int length;
@@ -444,7 +443,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
- spin_lock_irqsave(&tx->lock, flags);
+ spin_lock_bh(&tx->lock);
if (tsnep_tx_desc_available(tx) < count) {
/* ring full, shall not happen because queue is stopped if full
@@ -452,7 +451,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
*/
netif_stop_queue(tx->adapter->netdev);
- spin_unlock_irqrestore(&tx->lock, flags);
+ spin_unlock_bh(&tx->lock);
return NETDEV_TX_BUSY;
}
@@ -468,7 +467,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
tx->dropped++;
- spin_unlock_irqrestore(&tx->lock, flags);
+ spin_unlock_bh(&tx->lock);
netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
@@ -496,20 +495,19 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_queue(tx->adapter->netdev);
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ spin_unlock_bh(&tx->lock);
return NETDEV_TX_OK;
}
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
{
- unsigned long flags;
int budget = 128;
struct tsnep_tx_entry *entry;
int count;
int length;
- spin_lock_irqsave(&tx->lock, flags);
+ spin_lock_bh(&tx->lock);
do {
if (tx->read == tx->write)
@@ -568,18 +566,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
netif_wake_queue(tx->adapter->netdev);
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ spin_unlock_bh(&tx->lock);
return (budget != 0);
}
static bool tsnep_tx_pending(struct tsnep_tx *tx)
{
- unsigned long flags;
struct tsnep_tx_entry *entry;
bool pending = false;
- spin_lock_irqsave(&tx->lock, flags);
+ spin_lock_bh(&tx->lock);
if (tx->read != tx->write) {
entry = &tx->entry[tx->read];
@@ -589,7 +586,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx)
pending = true;
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ spin_unlock_bh(&tx->lock);
return pending;
}
--
2.30.2
Powered by blists - more mailing lists