[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <166919852169.1258552.10370784990641295051.stgit@warthog.procyon.org.uk>
Date: Wed, 23 Nov 2022 10:15:21 +0000
From: David Howells <dhowells@...hat.com>
To: netdev@...r.kernel.org
Cc: Marc Dionne <marc.dionne@...istor.com>,
linux-afs@...ts.infradead.org, dhowells@...hat.com,
linux-afs@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [PATCH net-next 07/17] rxrpc: Don't take spinlocks in the RCU
callback functions
Don't take spinlocks in the RCU callback functions as these are run in
softirq context - which then requires all other takers to use _bh-marked
locks.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Marc Dionne <marc.dionne@...istor.com>
cc: linux-afs@...ts.infradead.org
---
net/rxrpc/call_object.c | 30 +++++++-----------------------
net/rxrpc/conn_object.c | 18 +++++++++---------
2 files changed, 16 insertions(+), 32 deletions(-)
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 01ffe99516b8..d77b65bf3273 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -613,36 +613,16 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
/*
* Final call destruction - but must be done in process context.
*/
-static void rxrpc_destroy_call(struct work_struct *work)
+static void rxrpc_destroy_call(struct rcu_head *rcu)
{
- struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
+ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
struct rxrpc_net *rxnet = call->rxnet;
- rxrpc_delete_call_timer(call);
-
- rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
- rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
kmem_cache_free(rxrpc_call_jar, call);
if (atomic_dec_and_test(&rxnet->nr_calls))
wake_up_var(&rxnet->nr_calls);
}
-/*
- * Final call destruction under RCU.
- */
-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
-{
- struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
-
- if (rcu_read_lock_held()) {
- INIT_WORK(&call->processor, rxrpc_destroy_call);
- if (!rxrpc_queue_work(&call->processor))
- BUG();
- } else {
- rxrpc_destroy_call(&call->processor);
- }
-}
-
/*
* clean up a call
*/
@@ -663,8 +643,12 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
}
rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_put_ack);
+ rxrpc_delete_call_timer(call);
+
+ rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
+ rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
- call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
+ call_rcu(&call->rcu, rxrpc_destroy_call);
}
/*
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index f7c271a740ed..54821c2f6d89 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -249,6 +249,15 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
*/
rxrpc_purge_queue(&conn->rx_queue);
+ del_timer_sync(&conn->timer);
+ rxrpc_purge_queue(&conn->rx_queue);
+
+ conn->security->clear(conn);
+ key_put(conn->key);
+ rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
+ rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
+ rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
+
/* Leave final destruction to RCU. The connection processor work item
* must carry a ref on the connection to prevent us getting here whilst
* it is queued or running.
@@ -358,17 +367,8 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
ASSERTCMP(refcount_read(&conn->ref), ==, 0);
- del_timer_sync(&conn->timer);
- rxrpc_purge_queue(&conn->rx_queue);
-
- conn->security->clear(conn);
- key_put(conn->key);
- rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
- rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
-
if (atomic_dec_and_test(&conn->local->rxnet->nr_conns))
wake_up_var(&conn->local->rxnet->nr_conns);
- rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
kfree(conn);
_leave("");
Powered by blists - more mailing lists