[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231218075020.60826-4-kuniyu@amazon.com>
Date: Mon, 18 Dec 2023 16:50:19 +0900
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>
CC: Ivan Babrou <ivan@...udflare.com>, Kuniyuki Iwashima <kuniyu@...zon.com>,
Kuniyuki Iwashima <kuni1840@...il.com>, <netdev@...r.kernel.org>
Subject: [PATCH v3 net-next 3/4] af_unix: Run GC on only one CPU.
If more than 16000 inflight AF_UNIX sockets exist and the garbage
collector is not running, unix_(dgram|stream)_sendmsg() call unix_gc().
Also, they wait for unix_gc() to complete.
In unix_gc(), all inflight AF_UNIX sockets are traversed at least once,
and more if they are the GC candidate. Thus, sendmsg() significantly
slows down with too many inflight AF_UNIX sockets.
There is a small window to invoke multiple unix_gc() instances, which
will then be blocked by the same spinlock except for one.
Let's convert unix_gc() to use struct work so that it will not consume
CPUs unnecessarily.
Note WRITE_ONCE(gc_in_progress, true) is moved to wait_for_unix_gc().
If we leave the WRITE_ONCE() as is and use the following test to call
flush_work(), a process might not call it.
CPU 0 CPU 1
--- ---
start work and call __unix_gc()
if (work_pending(&unix_gc_work) || <-- false
READ_ONCE(gc_in_progress)) <-- false
flush_work(); <-- missed!
WRITE_ONCE(gc_in_progress, true)
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
net/unix/garbage.c | 37 +++++++++++++++++++------------------
1 file changed, 19 insertions(+), 18 deletions(-)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 4d634f5f6a55..0dba36b0bb95 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -86,7 +86,9 @@
/* Internal data structures and random procedures: */
static LIST_HEAD(gc_candidates);
-static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
+
+static void __unix_gc(struct work_struct *work);
+static DECLARE_WORK(unix_gc_work, __unix_gc);
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
@@ -188,17 +190,21 @@ void wait_for_unix_gc(void)
{
/* If number of inflight sockets is insane,
* force a garbage collect right now.
+ *
* Paired with the WRITE_ONCE() in unix_inflight(),
- * unix_notinflight() and gc_in_progress().
+ * unix_notinflight(), and __unix_gc().
*/
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
- !READ_ONCE(gc_in_progress))
- unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
+ !READ_ONCE(gc_in_progress)) {
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_unbound_wq, &unix_gc_work);
+ }
+
+ if (READ_ONCE(gc_in_progress))
+ flush_work(&unix_gc_work);
}
-/* The external entry point: unix_gc() */
-void unix_gc(void)
+static void __unix_gc(struct work_struct *work)
{
struct sk_buff *next_skb, *skb;
struct unix_sock *u;
@@ -209,13 +215,6 @@ void unix_gc(void)
spin_lock(&unix_gc_lock);
- /* Avoid a recursive GC. */
- if (gc_in_progress)
- goto out;
-
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
- WRITE_ONCE(gc_in_progress, true);
-
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
@@ -319,11 +318,13 @@ void unix_gc(void)
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
+ spin_unlock(&unix_gc_lock);
+
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
+}
- wake_up(&unix_gc_wait);
-
- out:
- spin_unlock(&unix_gc_lock);
+void unix_gc(void)
+{
+ queue_work(system_unbound_wq, &unix_gc_work);
}
--
2.30.2
Powered by blists - more mailing lists