[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20220131162838.1C48CC340ED@smtp.kernel.org>
Date: Thu, 13 Jan 2022 17:12:15 -0600
From: Clark Williams <williams@...hat.com>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
linux-rt-users@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH PREEMPT_RT 4.19 STABLE] net: Fix compiler warnings on 4.19 PREEMPT_RT with xmit_lock_owner
GCC 11 has started complaining about the PREEMPT_RT changes to 2fd949365fe6628fb2
which change the xmit_lock_owner from an integer to a pointer to struct
task_struct. These changes are from the patch:
net: move xmit_recursion to per-task variable on -RT
The below patch is ugly but fixes the compiler warnings.
I'd be happy to entertain a better/cleaner solution
Signed-off-by: Clark Williams <williams@...hat.com>
---
include/linux/netdevice.h | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7b34ce34114a..fa2a52d24218 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3921,7 +3921,11 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ WRITE_ONCE(txq->xmit_lock_owner, current);
+#else
WRITE_ONCE(txq->xmit_lock_owner, cpu);
+#endif
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -3939,7 +3943,11 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ WRITE_ONCE(txq->xmit_lock_owner, current);
+#else
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+#endif
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
@@ -3948,7 +3956,11 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
if (likely(ok)) {
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ WRITE_ONCE(txq->xmit_lock_owner, current);
+#else
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+#endif
}
return ok;
}
@@ -3956,14 +3968,22 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ WRITE_ONCE(txq->xmit_lock_owner, NULL);
+#else
WRITE_ONCE(txq->xmit_lock_owner, -1);
+#endif
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ WRITE_ONCE(txq->xmit_lock_owner, NULL);
+#else
WRITE_ONCE(txq->xmit_lock_owner, -1);
+#endif
spin_unlock_bh(&txq->_xmit_lock);
}
--
2.34.1
Powered by blists - more mailing lists