[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241001132702.3122709-2-dhowells@redhat.com>
Date: Tue, 1 Oct 2024 14:26:58 +0100
From: David Howells <dhowells@...hat.com>
To: netdev@...r.kernel.org
Cc: David Howells <dhowells@...hat.com>,
Marc Dionne <marc.dionne@...istor.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
linux-afs@...ts.infradead.org,
linux-kernel@...r.kernel.org,
yuxuanzhe@...look.com,
Simon Horman <horms@...nel.org>
Subject: [PATCH net 1/2] rxrpc: Fix a race between socket set up and I/O thread creation
In rxrpc_open_socket(), it sets up the socket and then sets up the I/O
thread that will handle it. This is a problem, however, as there's a gap
between the two phases in which a packet may come into rxrpc_encap_rcv()
from the UDP packet but we oops when trying to wake the not-yet created I/O
thread.
As a quick fix, just make rxrpc_encap_rcv() discard the packet if there's
no I/O thread yet.
A better, but more intrusive fix would perhaps be to rearrange things such
that the socket creation is done by the I/O thread.
Fixes: a275da62e8c1 ("rxrpc: Create a per-local endpoint receive queue and I/O thread")
Signed-off-by: David Howells <dhowells@...hat.com>
cc: yuxuanzhe@...look.com
cc: Marc Dionne <marc.dionne@...istor.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Simon Horman <horms@...nel.org>
cc: linux-afs@...ts.infradead.org
cc: netdev@...r.kernel.org
---
net/rxrpc/ar-internal.h | 2 +-
net/rxrpc/io_thread.c | 10 ++++++++--
net/rxrpc/local_object.c | 2 +-
3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 80d682f89b23..d0fd37bdcfe9 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1056,7 +1056,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
int rxrpc_io_thread(void *data);
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
{
- wake_up_process(local->io_thread);
+ wake_up_process(READ_ONCE(local->io_thread));
}
static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 0300baa9afcd..07c74c77d802 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{
struct sk_buff_head *rx_queue;
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
+ struct task_struct *io_thread;
if (unlikely(!local)) {
kfree_skb(skb);
return 0;
}
+ io_thread = READ_ONCE(local->io_thread);
+ if (!io_thread) {
+ kfree_skb(skb);
+ return 0;
+ }
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
#endif
skb_queue_tail(rx_queue, skb);
- rxrpc_wake_up_io_thread(local);
+ wake_up_process(io_thread);
return 0;
}
@@ -565,7 +571,7 @@ int rxrpc_io_thread(void *data)
__set_current_state(TASK_RUNNING);
rxrpc_see_local(local, rxrpc_local_stop);
rxrpc_destroy_local(local);
- local->io_thread = NULL;
+ WRITE_ONCE(local->io_thread, NULL);
rxrpc_see_local(local, rxrpc_local_stopped);
return 0;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 504453c688d7..f9623ace2201 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -232,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
wait_for_completion(&local->io_thread_ready);
- local->io_thread = io_thread;
+ WRITE_ONCE(local->io_thread, io_thread);
_leave(" = 0");
return 0;
Powered by blists - more mailing lists