[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240216210556.65913-6-kuniyu@amazon.com>
Date: Fri, 16 Feb 2024 13:05:47 -0800
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>
CC: Kuniyuki Iwashima <kuniyu@...zon.com>, Kuniyuki Iwashima
<kuni1840@...il.com>, <netdev@...r.kernel.org>
Subject: [PATCH v2 net-next 05/14] af_unix: Fix up unix_edge.successor for embryo socket.
To garbage collect inflight AF_UNIX sockets, we must define the
cyclic reference appropriately. This is a bit tricky if the loop
consists of embryo sockets.
Suppose that the fd of AF_UNIX socket A is passed to D and the fd B
to C and that C and D are embryo sockets of A and B, respectively.
It may appear that there are two separate graphs, A (-> D) and
B (-> C), but this is not correct.
A --. .-- B
X
C <-' `-> D
Now, D holds A's refcount, and C has B's refcount, so unix_release()
will never be called for A and B when we close() them. However, no
one can call close() for D and C to free skbs holding refcounts of A
and B because C/D is in A/B's receive queue, which should have been
purged by unix_release() for A and B.
So, here's a new type of cyclic reference. When a fd of an AF_UNIX
socket is passed to an embryo socket, the reference is indirectly
held by its parent listening socket.
.-> A .-> B
| `- sk_receive_queue | `- sk_receive_queue
| `- skb | `- skb
| `- sk == C | `- sk == D
| `- sk_receive_queue | `- sk_receive_queue
| `- skb +---------' `- skb +--.
| |
`----------------------------------------------------------'
Technically, the graph must be denoted as A <-> B instead of A (-> D)
and B (-> C) to find such a cyclic reference without touching each
socket's receive queue.
.-> A --. .-- B <-.
| X | == A <-> B
`-- C <-' `-> D --'
We apply this fixup in unix_add_edges() if the receiver is an embryo
socket.
We also link such edges to the embryo socket using another list_head
field, embryo_entry, because we need to restore the original separate
graphs A (-> D) and B (-> C) in unix_update_edges() once accept() is
called.
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
include/net/af_unix.h | 2 ++
net/unix/af_unix.c | 2 +-
net/unix/garbage.c | 27 ++++++++++++++++++++++++++-
3 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 438d2a18ba2e..2d8e93775e61 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -25,6 +25,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp);
void unix_init_vertex(struct unix_sock *u);
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver);
void unix_del_edges(struct scm_fp_list *fpl);
+void unix_update_edges(struct unix_sock *receiver);
int unix_alloc_edges(struct scm_fp_list *fpl);
void unix_free_edges(struct scm_fp_list *fpl);
void unix_gc(void);
@@ -40,6 +41,7 @@ struct unix_edge {
struct unix_vertex *predecessor;
struct unix_vertex *successor;
struct list_head entry;
+ struct list_head embryo_entry;
};
struct sock *unix_peer_get(struct sock *sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1ebc3c15f972..dab5d8d96e87 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1734,7 +1734,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
}
tsk = skb->sk;
- unix_sk(tsk)->listener = NULL;
+ unix_update_edges(unix_sk(tsk));
skb_free_datagram(sk, skb);
wake_up_interruptible(&unix_sk(sk)->peer_wait);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 353416f38738..97a43f8ec5a5 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -114,10 +114,16 @@ static LIST_HEAD(unix_unvisited_vertices);
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{
+ struct unix_vertex *successor;
int i = 0, j = 0;
spin_lock(&unix_gc_lock);
+ if (receiver->listener)
+ successor = &unix_sk(receiver->listener)->vertex;
+ else
+ successor = &receiver->vertex;
+
while (i < fpl->count_unix) {
struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
struct unix_edge *edge;
@@ -127,12 +133,15 @@ void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
edge = fpl->edges + i++;
edge->predecessor = &inflight->vertex;
- edge->successor = &receiver->vertex;
+ edge->successor = successor;
if (!edge->predecessor->out_degree++)
list_add_tail(&edge->predecessor->entry, &unix_unvisited_vertices);
list_add_tail(&edge->entry, &edge->predecessor->edges);
+
+ if (receiver->listener)
+ list_add_tail(&edge->embryo_entry, &receiver->vertex.edges);
}
spin_unlock(&unix_gc_lock);
@@ -160,6 +169,22 @@ void unix_del_edges(struct scm_fp_list *fpl)
fpl->inflight = false;
}
+void unix_update_edges(struct unix_sock *receiver)
+{
+ struct unix_edge *edge;
+
+ spin_lock(&unix_gc_lock);
+
+ list_for_each_entry(edge, &receiver->vertex.edges, embryo_entry)
+ edge->successor = &receiver->vertex;
+
+ list_del_init(&receiver->vertex.edges);
+
+ receiver->listener = NULL;
+
+ spin_unlock(&unix_gc_lock);
+}
+
int unix_alloc_edges(struct scm_fp_list *fpl)
{
if (!fpl->count_unix)
--
2.30.2
Powered by blists - more mailing lists