[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250428081744.52375-3-jiayuan.chen@linux.dev>
Date: Mon, 28 Apr 2025 16:16:53 +0800
From: Jiayuan Chen <jiayuan.chen@...ux.dev>
To: bpf@...r.kernel.org
Cc: mrpre@....com,
Jiayuan Chen <jiayuan.chen@...ux.dev>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <martin.lau@...ux.dev>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>,
Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>,
Jonathan Corbet <corbet@....net>,
Jakub Sitnicki <jakub@...udflare.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>,
Kuniyuki Iwashima <kuniyu@...zon.com>,
Willem de Bruijn <willemb@...gle.com>,
Mykola Lysenko <mykolal@...com>,
Shuah Khan <shuah@...nel.org>,
Jiapeng Chong <jiapeng.chong@...ux.alibaba.com>,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
linux-kselftest@...r.kernel.org
Subject: [PATCH bpf-next v1 2/3] bpf, sockmap: Affinitize workqueue to a specific CPU
Introduce a sk_psock_schedule_delayed_work() wrapper function, which calls
schedule_delayed_work_on() to specify the CPU for running the workqueue if
the BPF program has set the redirect CPU using
bpf_sk_skb_set_redirect_cpu(). Otherwise, it falls back to the original
logic.
Signed-off-by: Jiayuan Chen <jiayuan.chen@...ux.dev>
---
include/linux/skmsg.h | 12 ++++++++++++
net/core/skmsg.c | 9 +++++----
2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index b888481a845d..21c7dd47186f 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -396,6 +396,18 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
sk_error_report(sk);
}
+static inline void sk_psock_schedule_delayed_work(struct sk_psock *psock,
+ int delay)
+{
+ s32 redir_cpu = psock->redir_cpu;
+
+ if (redir_cpu != BPF_SK_REDIR_CPU_UNSET)
+ schedule_delayed_work_on(redir_cpu, &psock->work,
+ delay);
+ else
+ schedule_delayed_work(&psock->work, delay);
+}
+
struct sk_psock *sk_psock_init(struct sock *sk, int node);
void sk_psock_stop(struct sk_psock *psock);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 292752c783b5..af00c09263a8 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -689,7 +689,7 @@ static void sk_psock_backlog(struct work_struct *work)
* other work that might be here.
*/
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
- schedule_delayed_work(&psock->work, 1);
+ sk_psock_schedule_delayed_work(psock, 1);
goto end;
}
/* Hard errors break pipe and stop xmit. */
@@ -940,6 +940,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
sock_drop(from->sk, skb);
return -EIO;
}
+ psock_other->redir_cpu = from->redir_cpu;
spin_lock_bh(&psock_other->ingress_lock);
if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
spin_unlock_bh(&psock_other->ingress_lock);
@@ -949,7 +950,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
}
skb_queue_tail(&psock_other->ingress_skb, skb);
- schedule_delayed_work(&psock_other->work, 0);
+ sk_psock_schedule_delayed_work(psock_other, 0);
spin_unlock_bh(&psock_other->ingress_lock);
return 0;
}
@@ -1027,7 +1028,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
spin_lock_bh(&psock->ingress_lock);
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
skb_queue_tail(&psock->ingress_skb, skb);
- schedule_delayed_work(&psock->work, 0);
+ sk_psock_schedule_delayed_work(psock, 0);
err = 0;
}
spin_unlock_bh(&psock->ingress_lock);
@@ -1059,7 +1060,7 @@ static void sk_psock_write_space(struct sock *sk)
psock = sk_psock(sk);
if (likely(psock)) {
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
- schedule_delayed_work(&psock->work, 0);
+ sk_psock_schedule_delayed_work(psock, 0);
write_space = psock->saved_write_space;
}
rcu_read_unlock();
--
2.47.1
Powered by blists - more mailing lists