[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240204104601.55760-4-kerneljasonxing@gmail.com>
Date: Sun, 4 Feb 2024 18:46:01 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
dsahern@...nel.org
Cc: netdev@...r.kernel.org,
kerneljasonxing@...il.com,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next 2/2] tcp: add more DROP REASONS in child process
From: Jason Xing <kernelxing@...cent.com>
As the title said, add more reasons to narrow down the range about
why the skb should be dropped.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
include/net/dropreason-core.h | 6 ++++++
include/net/tcp.h | 5 +++--
net/ipv4/tcp_input.c | 19 +++++++++++++++----
net/ipv4/tcp_ipv4.c | 6 +++---
net/ipv4/tcp_minisocks.c | 4 ++--
net/ipv6/tcp_ipv6.c | 6 +++---
6 files changed, 32 insertions(+), 14 deletions(-)
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 85a19b883dee..980fd4442b7c 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -31,6 +31,8 @@
FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
+ FN(TCP_CONNREQNOTACCEPTABLE) \
+ FN(TCP_ABORTONDATA) \
FN(TCP_ZEROWINDOW) \
FN(TCP_OLD_DATA) \
FN(TCP_OVERWINDOW) \
@@ -203,6 +205,10 @@ enum skb_drop_reason {
SKB_DROP_REASON_SOCKET_BACKLOG,
/** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
SKB_DROP_REASON_TCP_FLAGS,
+ /** @SKB_DROP_REASON_TCP_CONNREQNOTACCEPTABLE: con req not acceptable */
+ SKB_DROP_REASON_TCP_CONNREQNOTACCEPTABLE,
+ /** @SKB_DROP_REASON_TCP_ABORTONDATA: abort on data */
+ SKB_DROP_REASON_TCP_ABORTONDATA,
/**
* @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
* see LINUX_MIB_TCPZEROWINDOWDROP
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e3b07d2790c4..8c87170cb0b5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -348,7 +348,8 @@ void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *drop_reason);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -397,7 +398,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
+ struct sk_buff *skb, enum skb_drop_reason *reason);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2d20edf652e6..bacb1140dab3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6616,7 +6616,8 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
* address independent.
*/
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *drop_reason)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -6632,8 +6633,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
goto discard;
case TCP_LISTEN:
- if (th->ack)
+ if (th->ack) {
+ SKB_DR_SET(*drop_reason, TCP_FLAGS);
return 1;
+ }
if (th->rst) {
SKB_DR_SET(reason, TCP_RESET);
@@ -6653,8 +6656,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
local_bh_enable();
rcu_read_unlock();
- if (!acceptable)
+ if (!acceptable) {
+ SKB_DR_SET(*drop_reason, TCP_CONNREQNOTACCEPTABLE);
return 1;
+ }
consume_skb(skb);
return 0;
}
@@ -6704,8 +6709,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
FLAG_NO_CHALLENGE_ACK);
if ((int)reason <= 0) {
- if (sk->sk_state == TCP_SYN_RECV)
+ if (sk->sk_state == TCP_SYN_RECV) {
+ if ((int)reason < 0)
+ *drop_reason = -reason;
return 1; /* send one RST */
+ }
/* accept old ack during closing */
if ((int)reason < 0) {
tcp_send_challenge_ack(sk);
@@ -6781,6 +6789,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (READ_ONCE(tp->linger2) < 0) {
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+ SKB_DR_SET(*drop_reason, TCP_ABORTONDATA);
return 1;
}
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
@@ -6790,6 +6799,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fastopen_active_disable(sk);
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+ SKB_DR_SET(*drop_reason, TCP_ABORTONDATA);
return 1;
}
@@ -6855,6 +6865,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk, skb);
+ SKB_DR_SET(*drop_reason, TCP_ABORTONDATA);
return 1;
}
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b63b0efa111d..7da62af0d890 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1918,7 +1918,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (!nsk)
goto discard;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb)) {
+ if (tcp_child_process(sk, nsk, skb, &reason)) {
rsk = nsk;
goto reset;
}
@@ -1927,7 +1927,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb)) {
+ if (tcp_rcv_state_process(sk, skb, &reason)) {
rsk = sk;
goto reset;
}
@@ -2276,7 +2276,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
+ } else if (tcp_child_process(sk, nsk, skb, &drop_reason)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
} else {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9e85f2a0bddd..49a88bf47b79 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -912,7 +912,7 @@ EXPORT_SYMBOL(tcp_check_req);
*/
int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb)
+ struct sk_buff *skb, enum skb_drop_reason *reason)
__releases(&((child)->sk_lock.slock))
{
int ret = 0;
@@ -923,7 +923,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb);
+ ret = tcp_rcv_state_process(child, skb, reason);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57b25b1fc9d9..ced52f7aa5bb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1657,7 +1657,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
+ if (tcp_child_process(sk, nsk, skb, &reason))
goto reset;
if (opt_skb)
__kfree_skb(opt_skb);
@@ -1666,7 +1666,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb))
+ if (tcp_rcv_state_process(sk, skb, &reason))
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
@@ -1856,7 +1856,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
+ } else if (tcp_child_process(sk, nsk, skb, &drop_reason)) {
tcp_v6_send_reset(nsk, skb);
goto discard_and_relse;
} else {
--
2.37.3
Powered by blists - more mailing lists