[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250521144803.2050504-11-lee@kernel.org>
Date: Wed, 21 May 2025 14:45:18 +0000
From: Lee Jones <lee@...nel.org>
To: lee@...nel.org,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Kuniyuki Iwashima <kuniyu@...zon.com>,
Jens Axboe <axboe@...nel.dk>,
Sasha Levin <sashal@...nel.org>,
Michal Luczaj <mhal@...x.co>,
Rao Shoaib <Rao.Shoaib@...cle.com>,
Pavel Begunkov <asml.silence@...il.com>,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org
Cc: stable@...r.kernel.org
Subject: [PATCH v6.6 10/26] af_unix: Bulk update unix_tot_inflight/unix_inflight when queuing skb.
From: Kuniyuki Iwashima <kuniyu@...zon.com>
[ Upstream commit 22c3c0c52d32f41cc38cd936ea0c93f22ced3315 ]
Currently, we track the number of inflight sockets in two variables.
unix_tot_inflight is the total number of inflight AF_UNIX sockets on
the host, and user->unix_inflight is the number of inflight fds per
user.
We update them one by one in unix_inflight(), which can be done once
in batch. Also, sendmsg() could fail even after unix_inflight(), then
we need to acquire unix_gc_lock only to decrement the counters.
Let's bulk update the counters in unix_add_edges() and unix_del_edges(),
which is called only for successfully passed fds.
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
Acked-by: Paolo Abeni <pabeni@...hat.com>
Link: https://lore.kernel.org/r/20240325202425.60930-5-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
(cherry picked from commit 22c3c0c52d32f41cc38cd936ea0c93f22ced3315)
Signed-off-by: Lee Jones <lee@...nel.org>
---
net/unix/garbage.c | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index b5b4a200dbf3b..f7041fc230008 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -144,6 +144,7 @@ static void unix_free_vertices(struct scm_fp_list *fpl)
}
DEFINE_SPINLOCK(unix_gc_lock);
+unsigned int unix_tot_inflight;
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{
@@ -168,7 +169,10 @@ void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
unix_add_edge(fpl, edge);
} while (i < fpl->count_unix);
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
out:
+ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
+
spin_unlock(&unix_gc_lock);
fpl->inflight = true;
@@ -191,7 +195,10 @@ void unix_del_edges(struct scm_fp_list *fpl)
unix_del_edge(fpl, edge);
} while (i < fpl->count_unix);
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
out:
+ WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
+
spin_unlock(&unix_gc_lock);
fpl->inflight = false;
@@ -234,7 +241,6 @@ void unix_destroy_fpl(struct scm_fp_list *fpl)
unix_free_vertices(fpl);
}
-unsigned int unix_tot_inflight;
static LIST_HEAD(gc_candidates);
static LIST_HEAD(gc_inflight_list);
@@ -255,13 +261,8 @@ void unix_inflight(struct user_struct *user, struct file *filp)
WARN_ON_ONCE(list_empty(&u->link));
}
u->inflight++;
-
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
-
spin_unlock(&unix_gc_lock);
}
@@ -278,13 +279,8 @@ void unix_notinflight(struct user_struct *user, struct file *filp)
u->inflight--;
if (!u->inflight)
list_del_init(&u->link);
-
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
}
- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
-
spin_unlock(&unix_gc_lock);
}
--
2.49.0.1112.g889b7c5bd8-goog
Powered by blists - more mailing lists