[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210310053222.41371-5-xiyou.wangcong@gmail.com>
Date: Tue, 9 Mar 2021 21:32:15 -0800
From: Cong Wang <xiyou.wangcong@...il.com>
To: netdev@...r.kernel.org
Cc: bpf@...r.kernel.org, duanxiongchun@...edance.com,
wangdongdong.6@...edance.com, jiang.wang@...edance.com,
Cong Wang <cong.wang@...edance.com>,
John Fastabend <john.fastabend@...il.com>,
Daniel Borkmann <daniel@...earbox.net>,
Jakub Sitnicki <jakub@...udflare.com>,
Lorenz Bauer <lmb@...udflare.com>
Subject: [Patch bpf-next v4 04/11] skmsg: avoid lock_sock() in sk_psock_backlog()
From: Cong Wang <cong.wang@...edance.com>
We do not have to lock the sock to avoid losing sk_socket,
instead we can purge all the ingress queues when we close
the socket. Sending or receiving packets after orphaning
socket makes no sense.
We do purge these queues when psock refcnt reaches 0 but
here we want to purge them explicitly in sock_map_close().
Cc: John Fastabend <john.fastabend@...il.com>
Cc: Daniel Borkmann <daniel@...earbox.net>
Cc: Jakub Sitnicki <jakub@...udflare.com>
Cc: Lorenz Bauer <lmb@...udflare.com>
Signed-off-by: Cong Wang <cong.wang@...edance.com>
---
include/linux/skmsg.h | 1 +
net/core/skmsg.c | 22 ++++++++++++++--------
net/core/sock_map.c | 1 +
3 files changed, 16 insertions(+), 8 deletions(-)
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 7333bf881b81..91b357817bb8 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -347,6 +347,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
+void sk_psock_purge(struct sk_psock *psock);
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 41a5f82c53e6..bf0f874780c1 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -497,7 +497,7 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
if (!ingress) {
if (!sock_writeable(psock->sk))
return -EAGAIN;
- return skb_send_sock_locked(psock->sk, skb, off, len);
+ return skb_send_sock(psock->sk, skb, off, len);
}
return sk_psock_skb_ingress(psock, skb);
}
@@ -511,8 +511,6 @@ static void sk_psock_backlog(struct work_struct *work)
u32 len, off;
int ret;
- /* Lock sock to avoid losing sk_socket during loop. */
- lock_sock(psock->sk);
if (state->skb) {
skb = state->skb;
len = state->len;
@@ -529,7 +527,7 @@ static void sk_psock_backlog(struct work_struct *work)
skb_bpf_redirect_clear(skb);
do {
ret = -EIO;
- if (likely(psock->sk->sk_socket))
+ if (!sock_flag(psock->sk, SOCK_DEAD))
ret = sk_psock_handle_skb(psock, skb, off,
len, ingress);
if (ret <= 0) {
@@ -537,13 +535,13 @@ static void sk_psock_backlog(struct work_struct *work)
state->skb = skb;
state->len = len;
state->off = off;
- goto end;
+ return;
}
/* Hard errors break pipe and stop xmit. */
sk_psock_report_error(psock, ret ? -ret : EPIPE);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
kfree_skb(skb);
- goto end;
+ return;
}
off += ret;
len -= ret;
@@ -552,8 +550,6 @@ static void sk_psock_backlog(struct work_struct *work)
if (!ingress)
kfree_skb(skb);
}
-end:
- release_sock(psock->sk);
}
struct sk_psock *sk_psock_init(struct sock *sk, int node)
@@ -654,6 +650,16 @@ static void sk_psock_link_destroy(struct sk_psock *psock)
}
}
+void sk_psock_purge(struct sk_psock *psock)
+{
+ sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
+
+ cancel_work_sync(&psock->work);
+
+ sk_psock_cork_free(psock);
+ sk_psock_zap_ingress(psock);
+}
+
static void sk_psock_done_strp(struct sk_psock *psock);
static void sk_psock_destroy_deferred(struct work_struct *gc)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index dd53a7771d7e..26ba47b099f1 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1540,6 +1540,7 @@ void sock_map_close(struct sock *sk, long timeout)
saved_close = psock->saved_close;
sock_map_remove_links(sk, psock);
rcu_read_unlock();
+ sk_psock_purge(psock);
release_sock(sk);
saved_close(sk, timeout);
}
--
2.25.1
Powered by blists - more mailing lists