[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240216210556.65913-11-kuniyu@amazon.com>
Date: Fri, 16 Feb 2024 13:05:52 -0800
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>
CC: Kuniyuki Iwashima <kuniyu@...zon.com>, Kuniyuki Iwashima
<kuni1840@...il.com>, <netdev@...r.kernel.org>
Subject: [PATCH v2 net-next 10/14] af_unix: Avoid Tarjan's algorithm if unnecessary.
Once a cyclic reference is formed, we need to run GC to check if
there is dead SCC.
However, we do not need to run Tarjan's algorithm if we know that
the shape of the inflight graph has not been changed.
If an edge is added/updated/deleted and the edge's successor is
inflight, we set false to unix_graph_grouped, which means we need
to re-classify SCC.
Once we finalise SCC, we set false to unix_graph_grouped.
While unix_graph_grouped is false, we can iterate the grouped
SCC using vertex->scc_entry in unix_walk_scc_fast().
list_add() and list_for_each_entry_reverse() uses seem weird, but
they are to keep the vertex order consistent and make writing test
easier.
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
net/unix/garbage.c | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 90f04d786dae..1e919fe65737 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -108,9 +108,11 @@ void unix_init_vertex(struct unix_sock *u)
vertex->out_degree = 0;
vertex->self_degree = 0;
INIT_LIST_HEAD(&vertex->edges);
+ INIT_LIST_HEAD(&vertex->scc_entry);
}
static bool unix_graph_maybe_cyclic;
+static bool unix_graph_grouped;
static void unix_graph_update(struct unix_edge *edge)
{
@@ -121,6 +123,7 @@ static void unix_graph_update(struct unix_edge *edge)
return;
unix_graph_maybe_cyclic = true;
+ unix_graph_grouped = false;
}
DEFINE_SPINLOCK(unix_gc_lock);
@@ -339,6 +342,25 @@ static void unix_walk_scc(void)
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
+ unix_graph_grouped = true;
+}
+
+static void unix_walk_scc_fast(void)
+{
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+ struct list_head scc;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ list_add(&scc, &vertex->scc_entry);
+
+ list_for_each_entry_reverse(vertex, &scc, scc_entry)
+ list_move_tail(&vertex->entry, &unix_visited_vertices);
+
+ list_del(&scc);
+ }
+
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
}
static LIST_HEAD(gc_candidates);
@@ -491,7 +513,10 @@ static void __unix_gc(struct work_struct *work)
if (!unix_graph_maybe_cyclic)
goto skip_gc;
- unix_walk_scc();
+ if (unix_graph_grouped)
+ unix_walk_scc_fast();
+ else
+ unix_walk_scc();
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
--
2.30.2
Powered by blists - more mailing lists