[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240216210556.65913-10-kuniyu@amazon.com>
Date: Fri, 16 Feb 2024 13:05:51 -0800
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>
CC: Kuniyuki Iwashima <kuniyu@...zon.com>, Kuniyuki Iwashima
<kuni1840@...il.com>, <netdev@...r.kernel.org>
Subject: [PATCH v2 net-next 09/14] af_unix: Skip GC if no cycle exists.
We do not need to run GC if there is no possible cyclic reference.
We use unix_graph_maybe_cyclic to decide if we should run GC.
If a fd of an AF_UNIX socket is passed to an already inflight AF_UNIX
socket, they could form a cyclic reference. Then, we set true to
unix_graph_maybe_cyclic and later run Tarjan's algorithm to group
them into SCC.
Once we run Tarjan's algorithm, we are 100% sure whether cyclic
references exist or not. If there is no cycle, we set false to
unix_graph_maybe_cyclic and can skip the entire garbage collection
next time.
When finalising SCC, we set true to unix_graph_maybe_cyclic if SCC
consists of multiple vertices.
Even if SCC is a single vertex, a cycle might exist as self-fd passing.
To detect the corner case, we can check all edges of the vertex, but
instead, we add a new field that counts the number of self-references
to finish the decision in O(1) time.
With this change, __unix_gc() is just a spin_lock() dance in the normal
usage.
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
include/net/af_unix.h | 1 +
net/unix/garbage.c | 56 +++++++++++++++++++++++++++++++++++++++++--
2 files changed, 55 insertions(+), 2 deletions(-)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b3ba5e949d62..59ec8d7880ce 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -36,6 +36,7 @@ struct unix_vertex {
struct list_head entry;
struct list_head scc_entry;
unsigned long out_degree;
+ unsigned long self_degree;
unsigned long index;
unsigned long lowlink;
};
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index c4b0cc438c64..90f04d786dae 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -106,9 +106,23 @@ void unix_init_vertex(struct unix_sock *u)
struct unix_vertex *vertex = &u->vertex;
vertex->out_degree = 0;
+ vertex->self_degree = 0;
INIT_LIST_HEAD(&vertex->edges);
}
+static bool unix_graph_maybe_cyclic;
+
+static void unix_graph_update(struct unix_edge *edge)
+{
+ if (unix_graph_maybe_cyclic)
+ return;
+
+ if (!edge->successor->out_degree)
+ return;
+
+ unix_graph_maybe_cyclic = true;
+}
+
DEFINE_SPINLOCK(unix_gc_lock);
static LIST_HEAD(unix_unvisited_vertices);
unsigned int unix_tot_inflight;
@@ -144,6 +158,9 @@ void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
edge->predecessor = &inflight->vertex;
edge->successor = successor;
+ if (edge->predecessor == edge->successor)
+ edge->predecessor->self_degree++;
+
if (!edge->predecessor->out_degree++) {
edge->predecessor->index = unix_vertex_unvisited_index;
@@ -154,6 +171,8 @@ void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
if (receiver->listener)
list_add_tail(&edge->embryo_entry, &receiver->vertex.edges);
+
+ unix_graph_update(edge);
}
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
@@ -173,10 +192,15 @@ void unix_del_edges(struct scm_fp_list *fpl)
while (i < fpl->count_unix) {
struct unix_edge *edge = fpl->edges + i++;
+ unix_graph_update(edge);
+
list_del(&edge->entry);
if (!--edge->predecessor->out_degree)
list_del_init(&edge->predecessor->entry);
+
+ if (edge->predecessor == edge->successor)
+ edge->predecessor->self_degree--;
}
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
@@ -193,8 +217,14 @@ void unix_update_edges(struct unix_sock *receiver)
spin_lock(&unix_gc_lock);
- list_for_each_entry(edge, &receiver->vertex.edges, embryo_entry)
+ list_for_each_entry(edge, &receiver->vertex.edges, embryo_entry) {
+ unix_graph_update(edge);
+
+ if (edge->predecessor == edge->successor)
+ edge->predecessor->self_degree--;
+
edge->successor = &receiver->vertex;
+ }
list_del_init(&receiver->vertex.edges);
@@ -224,6 +254,20 @@ void unix_free_edges(struct scm_fp_list *fpl)
kvfree(fpl->edges);
}
+static bool unix_scc_cyclic(struct list_head *scc)
+{
+ struct unix_vertex *vertex;
+
+ if (!list_is_singular(scc))
+ return true;
+
+ vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
+ if (vertex->self_degree)
+ return true;
+
+ return false;
+}
+
static LIST_HEAD(unix_visited_vertices);
static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
@@ -272,6 +316,9 @@ static void __unix_walk_scc(struct unix_vertex *vertex)
vertex->index = unix_vertex_grouped_index;
}
+ if (!unix_graph_maybe_cyclic)
+ unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+
list_del(&scc);
}
@@ -281,6 +328,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex)
static void unix_walk_scc(void)
{
+ unix_graph_maybe_cyclic = false;
+
while (!list_empty(&unix_unvisited_vertices)) {
struct unix_vertex *vertex;
@@ -439,6 +488,9 @@ static void __unix_gc(struct work_struct *work)
spin_lock(&unix_gc_lock);
+ if (!unix_graph_maybe_cyclic)
+ goto skip_gc;
+
unix_walk_scc();
/* First, select candidates for garbage collection. Only
@@ -536,7 +588,7 @@ static void __unix_gc(struct work_struct *work)
/* All candidates should have been detached by now. */
WARN_ON_ONCE(!list_empty(&gc_candidates));
-
+skip_gc:
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
--
2.30.2
Powered by blists - more mailing lists