[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250521152920.1116756-18-lee@kernel.org>
Date: Wed, 21 May 2025 16:27:16 +0100
From: Lee Jones <lee@...nel.org>
To: lee@...nel.org,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Christian Brauner <brauner@...nel.org>,
Kuniyuki Iwashima <kuniyu@...zon.com>,
Alexander Mikhalitsyn <aleksandr.mikhalitsyn@...onical.com>,
Jens Axboe <axboe@...nel.dk>,
Sasha Levin <sashal@...nel.org>,
Michal Luczaj <mhal@...x.co>,
Rao Shoaib <Rao.Shoaib@...cle.com>,
Simon Horman <horms@...nel.org>,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org
Cc: stable@...r.kernel.org
Subject: [PATCH v6.1 17/27] af_unix: Skip GC if no cycle exists.
From: Kuniyuki Iwashima <kuniyu@...zon.com>
[ Upstream commit 77e5593aebba823bcbcf2c4b58b07efcd63933b8 ]
We do not need to run GC if there is no possible cyclic reference.
We use unix_graph_maybe_cyclic to decide if we should run GC.
If a fd of an AF_UNIX socket is passed to an already inflight AF_UNIX
socket, they could form a cyclic reference. Then, we set true to
unix_graph_maybe_cyclic and later run Tarjan's algorithm to group
them into SCC.
Once we run Tarjan's algorithm, we are 100% sure whether cyclic
references exist or not. If there is no cycle, we set false to
unix_graph_maybe_cyclic and can skip the entire garbage collection
next time.
When finalising SCC, we set true to unix_graph_maybe_cyclic if SCC
consists of multiple vertices.
Even if SCC is a single vertex, a cycle might exist as self-fd passing.
Given the corner case is rare, we detect it by checking all edges of
the vertex and set true to unix_graph_maybe_cyclic.
With this change, __unix_gc() is just a spin_lock() dance in the normal
usage.
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
Acked-by: Paolo Abeni <pabeni@...hat.com>
Link: https://lore.kernel.org/r/20240325202425.60930-11-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
(cherry picked from commit 77e5593aebba823bcbcf2c4b58b07efcd63933b8)
Signed-off-by: Lee Jones <lee@...nel.org>
---
net/unix/garbage.c | 48 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 47 insertions(+), 1 deletion(-)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index feae6c17b291..8f0dc39bb72f 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -112,6 +112,19 @@ static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
return edge->successor->vertex;
}
+static bool unix_graph_maybe_cyclic;
+
+static void unix_update_graph(struct unix_vertex *vertex)
+{
+ /* If the receiver socket is not inflight, no cyclic
+ * reference could be formed.
+ */
+ if (!vertex)
+ return;
+
+ unix_graph_maybe_cyclic = true;
+}
+
static LIST_HEAD(unix_unvisited_vertices);
enum unix_vertex_index {
@@ -138,12 +151,16 @@ static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
vertex->out_degree++;
list_add_tail(&edge->vertex_entry, &vertex->edges);
+
+ unix_update_graph(unix_edge_successor(edge));
}
static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{
struct unix_vertex *vertex = edge->predecessor->vertex;
+ unix_update_graph(unix_edge_successor(edge));
+
list_del(&edge->vertex_entry);
vertex->out_degree--;
@@ -227,6 +244,7 @@ void unix_del_edges(struct scm_fp_list *fpl)
void unix_update_edges(struct unix_sock *receiver)
{
spin_lock(&unix_gc_lock);
+ unix_update_graph(unix_sk(receiver->listener)->vertex);
receiver->listener = NULL;
spin_unlock(&unix_gc_lock);
}
@@ -268,6 +286,26 @@ void unix_destroy_fpl(struct scm_fp_list *fpl)
unix_free_vertices(fpl);
}
+static bool unix_scc_cyclic(struct list_head *scc)
+{
+ struct unix_vertex *vertex;
+ struct unix_edge *edge;
+
+ /* SCC containing multiple vertices ? */
+ if (!list_is_singular(scc))
+ return true;
+
+ vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
+
+ /* Self-reference or a embryo-listener circle ? */
+ list_for_each_entry(edge, &vertex->edges, vertex_entry) {
+ if (unix_edge_successor(edge) == vertex)
+ return true;
+ }
+
+ return false;
+}
+
static LIST_HEAD(unix_visited_vertices);
static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
@@ -353,6 +391,9 @@ static void __unix_walk_scc(struct unix_vertex *vertex)
vertex->index = unix_vertex_grouped_index;
}
+ if (!unix_graph_maybe_cyclic)
+ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+
list_del(&scc);
}
@@ -363,6 +404,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex)
static void unix_walk_scc(void)
{
+ unix_graph_maybe_cyclic = false;
+
/* Visit every vertex exactly once.
* __unix_walk_scc() moves visited vertices to unix_visited_vertices.
*/
@@ -524,6 +567,9 @@ static void __unix_gc(struct work_struct *work)
spin_lock(&unix_gc_lock);
+ if (!unix_graph_maybe_cyclic)
+ goto skip_gc;
+
unix_walk_scc();
/* First, select candidates for garbage collection. Only
@@ -633,7 +679,7 @@ static void __unix_gc(struct work_struct *work)
/* All candidates should have been detached by now. */
WARN_ON_ONCE(!list_empty(&gc_candidates));
-
+skip_gc:
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
--
2.49.0.1143.g0be31eac6b-goog
Powered by blists - more mailing lists