[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <de516124-ffd7-d159-2848-00c65a8573a8@ya.ru>
Date: Sun, 31 Jul 2022 23:39:39 +0300
From: Kirill Tkhai <tkhai@...ru>
To: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Linux Kernel Network Developers <netdev@...r.kernel.org>,
tkhai@...ru
Subject: [PATCH] net: skb content must be visible for lockless skb_peek() and
its variations
From: Kirill Tkhai <tkhai@...ru>
Currently, there are no barriers, and skb->xxx update may become invisible on cpu2.
In the below example var2 may point to intial_val0 instead of expected var1:
[cpu1] [cpu2]
skb->xxx = initial_val0;
...
skb->xxx = var1; skb = READ_ONCE(prev_skb->next);
<no barrier> <no barrier>
WRITE_ONCE(prev_skb->next, skb); var2 = skb->xxx;
This patch adds barriers and fixes the problem. Note, that __skb_peek() is not patched,
since it's a lowlevel function, and a caller has to understand the things it does (and
also __skb_peek() is used under queue lock in some places).
Signed-off-by: Kirill Tkhai <tkhai@...ru>
---
Hi, David, Eric and other developers,
picking unix sockets code I found this problem, and for me it looks like it exists. If there
are arguments that everything is OK and it's expected, please, explain.
Best wishes,
Kirill
include/linux/skbuff.h | 9 ++++++---
net/core/skbuff.c | 6 ++++++
2 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ca8afa382bf2..2939a5dc0ad7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2018,7 +2018,8 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->next;
+ /* Pairs with mb in skb_queue_tail() and variations */
+ struct sk_buff *skb = smp_load_acquire(&list_->next);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -2048,7 +2049,8 @@ static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
const struct sk_buff_head *list_)
{
- struct sk_buff *next = skb->next;
+ /* Pairs with mb in skb_queue_tail() and variations */
+ struct sk_buff *next = smp_load_acquire(&skb->next);
if (next == (struct sk_buff *)list_)
next = NULL;
@@ -2070,7 +2072,8 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = READ_ONCE(list_->prev);
+ /* Pairs with mb in skb_queue_tail() and variations */
+ struct sk_buff *skb = smp_load_acquire(&list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 974bbbbe7138..1de46eb91405 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3373,6 +3373,8 @@ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
+ /* Pairs with mb in skb_peek() and variations */
+ smp_mb__after_spinlock();
__skb_queue_head(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
@@ -3394,6 +3396,8 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
+ /* Pairs with mb in skb_peek() and variations */
+ smp_mb__after_spinlock();
__skb_queue_tail(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
@@ -3434,6 +3438,8 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
unsigned long flags;
spin_lock_irqsave(&list->lock, flags);
+ /* Pairs with mb in skb_peek() and variations */
+ smp_mb__after_spinlock();
__skb_queue_after(list, old, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
Powered by blists - more mailing lists