[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1366341367.3205.85.camel@edumazet-glaptop>
Date: Thu, 18 Apr 2013 20:16:07 -0700
From: Eric Dumazet <eric.dumazet@...il.com>
To: David Miller <davem@...emloft.net>
Cc: netdev <netdev@...r.kernel.org>, Yuchung Cheng <ycheng@...gle.com>
Subject: [PATCH net-next] tcp: remove one indentation level in
tcp_rcv_state_process()
From: Eric Dumazet <edumazet@...gle.com>
Remove one level of indentation 'introduced' in commit
c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
@acceptable variable is a boolean.
This patch is a pure cleanup.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Cc: Yuchung Cheng <ycheng@...gle.com>
---
net/ipv4/tcp_input.c | 267 ++++++++++++++++++++---------------------
1 file changed, 132 insertions(+), 135 deletions(-)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6d9ca35..08f58c1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5595,6 +5595,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock *req;
int queued = 0;
+ bool acceptable;
tp->rx_opt.saw_tstamp = 0;
@@ -5665,156 +5666,152 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0;
/* step 5: check the ACK field */
- if (true) {
- int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
-
- switch (sk->sk_state) {
- case TCP_SYN_RECV:
- if (acceptable) {
- /* Once we leave TCP_SYN_RECV, we no longer
- * need req so release it.
- */
- if (req) {
- tcp_synack_rtt_meas(sk, req);
- tp->total_retrans = req->num_retrans;
-
- reqsk_fastopen_remove(sk, req, false);
- } else {
- /* Make sure socket is routed, for
- * correct metrics.
- */
- icsk->icsk_af_ops->rebuild_header(sk);
- tcp_init_congestion_control(sk);
+ acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
- tcp_mtup_init(sk);
- tcp_init_buffer_space(sk);
- tp->copied_seq = tp->rcv_nxt;
- }
- smp_mb();
- tcp_set_state(sk, TCP_ESTABLISHED);
- sk->sk_state_change(sk);
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+ if (acceptable) {
+ /* Once we leave TCP_SYN_RECV, we no longer
+ * need req so release it.
+ */
+ if (req) {
+ tcp_synack_rtt_meas(sk, req);
+ tp->total_retrans = req->num_retrans;
- /* Note, that this wakeup is only for marginal
- * crossed SYN case. Passively open sockets
- * are not waked up, because sk->sk_sleep ==
- * NULL and sk->sk_socket == NULL.
+ reqsk_fastopen_remove(sk, req, false);
+ } else {
+ /* Make sure socket is routed, for
+ * correct metrics.
*/
- if (sk->sk_socket)
- sk_wake_async(sk,
- SOCK_WAKE_IO, POLL_OUT);
-
- tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
- tp->snd_wnd = ntohs(th->window) <<
- tp->rx_opt.snd_wscale;
- tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
-
- if (tp->rx_opt.tstamp_ok)
- tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
-
- if (req) {
- /* Re-arm the timer because data may
- * have been sent out. This is similar
- * to the regular data transmission case
- * when new data has just been ack'ed.
- *
- * (TFO) - we could try to be more
- * aggressive and retranmitting any data
- * sooner based on when they were sent
- * out.
- */
- tcp_rearm_rto(sk);
- } else
- tcp_init_metrics(sk);
+ icsk->icsk_af_ops->rebuild_header(sk);
+ tcp_init_congestion_control(sk);
- /* Prevent spurious tcp_cwnd_restart() on
- * first data packet.
+ tcp_mtup_init(sk);
+ tcp_init_buffer_space(sk);
+ tp->copied_seq = tp->rcv_nxt;
+ }
+ smp_mb();
+ tcp_set_state(sk, TCP_ESTABLISHED);
+ sk->sk_state_change(sk);
+
+ /* Note, that this wakeup is only for marginal
+ * crossed SYN case. Passively open sockets
+ * are not waked up, because sk->sk_sleep ==
+ * NULL and sk->sk_socket == NULL.
+ */
+ if (sk->sk_socket)
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+
+ tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
+ tp->snd_wnd = ntohs(th->window) <<
+ tp->rx_opt.snd_wscale;
+ tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+
+ if (tp->rx_opt.tstamp_ok)
+ tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+
+ if (req) {
+ /* Re-arm the timer because data may
+ * have been sent out. This is similar
+ * to the regular data transmission case
+ * when new data has just been ack'ed.
+ *
+ * (TFO) - we could try to be more
+ * aggressive and retranmitting any data
+ * sooner based on when they were sent out.
*/
- tp->lsndtime = tcp_time_stamp;
+ tcp_rearm_rto(sk);
+ } else
+ tcp_init_metrics(sk);
- tcp_initialize_rcv_mss(sk);
- tcp_fast_path_on(tp);
- } else {
- return 1;
- }
- break;
+ /* Prevent spurious tcp_cwnd_restart() on
+ * first data packet.
+ */
+ tp->lsndtime = tcp_time_stamp;
- case TCP_FIN_WAIT1:
- /* If we enter the TCP_FIN_WAIT1 state and we are a
- * Fast Open socket and this is the first acceptable
- * ACK we have received, this would have acknowledged
- * our SYNACK so stop the SYNACK timer.
+ tcp_initialize_rcv_mss(sk);
+ tcp_fast_path_on(tp);
+ } else {
+ return 1;
+ }
+ break;
+
+ case TCP_FIN_WAIT1:
+ /* If we enter the TCP_FIN_WAIT1 state and we are a
+ * Fast Open socket and this is the first acceptable
+ * ACK we have received, this would have acknowledged
+ * our SYNACK so stop the SYNACK timer.
+ */
+ if (req != NULL) {
+ /* Return RST if ack_seq is invalid.
+ * Note that RFC793 only says to generate a
+ * DUPACK for it but for TCP Fast Open it seems
+ * better to treat this case like TCP_SYN_RECV
+ * above.
*/
- if (req != NULL) {
- /* Return RST if ack_seq is invalid.
- * Note that RFC793 only says to generate a
- * DUPACK for it but for TCP Fast Open it seems
- * better to treat this case like TCP_SYN_RECV
- * above.
- */
- if (!acceptable)
+ if (!acceptable)
+ return 1;
+ /* We no longer need the request sock. */
+ reqsk_fastopen_remove(sk, req, false);
+ tcp_rearm_rto(sk);
+ }
+ if (tp->snd_una == tp->write_seq) {
+ struct dst_entry *dst;
+
+ tcp_set_state(sk, TCP_FIN_WAIT2);
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+
+ dst = __sk_dst_get(sk);
+ if (dst)
+ dst_confirm(dst);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ /* Wake up lingering close() */
+ sk->sk_state_change(sk);
+ else {
+ int tmo;
+
+ if (tp->linger2 < 0 ||
+ (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+ after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
+ tcp_done(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
return 1;
- /* We no longer need the request sock. */
- reqsk_fastopen_remove(sk, req, false);
- tcp_rearm_rto(sk);
- }
- if (tp->snd_una == tp->write_seq) {
- struct dst_entry *dst;
-
- tcp_set_state(sk, TCP_FIN_WAIT2);
- sk->sk_shutdown |= SEND_SHUTDOWN;
-
- dst = __sk_dst_get(sk);
- if (dst)
- dst_confirm(dst);
-
- if (!sock_flag(sk, SOCK_DEAD))
- /* Wake up lingering close() */
- sk->sk_state_change(sk);
- else {
- int tmo;
-
- if (tp->linger2 < 0 ||
- (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
- after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
- tcp_done(sk);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
- }
+ }
- tmo = tcp_fin_time(sk);
- if (tmo > TCP_TIMEWAIT_LEN) {
- inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
- } else if (th->fin || sock_owned_by_user(sk)) {
- /* Bad case. We could lose such FIN otherwise.
- * It is not a big problem, but it looks confusing
- * and not so rare event. We still can lose it now,
- * if it spins in bh_lock_sock(), but it is really
- * marginal case.
- */
- inet_csk_reset_keepalive_timer(sk, tmo);
- } else {
- tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
- goto discard;
- }
+ tmo = tcp_fin_time(sk);
+ if (tmo > TCP_TIMEWAIT_LEN) {
+ inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+ } else if (th->fin || sock_owned_by_user(sk)) {
+ /* Bad case. We could lose such FIN otherwise.
+ * It is not a big problem, but it looks confusing
+ * and not so rare event. We still can lose it now,
+ * if it spins in bh_lock_sock(), but it is really
+ * marginal case.
+ */
+ inet_csk_reset_keepalive_timer(sk, tmo);
+ } else {
+ tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
+ goto discard;
}
}
- break;
+ }
+ break;
- case TCP_CLOSING:
- if (tp->snd_una == tp->write_seq) {
- tcp_time_wait(sk, TCP_TIME_WAIT, 0);
- goto discard;
- }
- break;
+ case TCP_CLOSING:
+ if (tp->snd_una == tp->write_seq) {
+ tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+ goto discard;
+ }
+ break;
- case TCP_LAST_ACK:
- if (tp->snd_una == tp->write_seq) {
- tcp_update_metrics(sk);
- tcp_done(sk);
- goto discard;
- }
- break;
+ case TCP_LAST_ACK:
+ if (tp->snd_una == tp->write_seq) {
+ tcp_update_metrics(sk);
+ tcp_done(sk);
+ goto discard;
}
+ break;
}
/* ts_recent update must be made after we are sure that the packet
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists