[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAK6E8=deSubEg3QnLw7ZGAe8q=yOnuHJk7L1bO7KPFv2HB7Low@mail.gmail.com>
Date: Tue, 26 Jun 2018 10:16:19 -0700
From: Yuchung Cheng <ycheng@...gle.com>
To: Eric Dumazet <edumazet@...gle.com>
Cc: "David S . Miller" <davem@...emloft.net>,
netdev <netdev@...r.kernel.org>,
Eric Dumazet <eric.dumazet@...il.com>
Subject: Re: [PATCH net-next] tcp: remove one indentation level in tcp_create_openreq_child
On Tue, Jun 26, 2018 at 8:45 AM, Eric Dumazet <edumazet@...gle.com> wrote:
> Signed-off-by: Eric Dumazet <edumazet@...gle.com>
> ---
nice refactor!
Acked-by: Yuchung Cheng <ycheng@...gle.com>
> net/ipv4/tcp_minisocks.c | 223 ++++++++++++++++++++-------------------
> 1 file changed, 113 insertions(+), 110 deletions(-)
>
> diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
> index 1dda1341a223937580b4efdbedb21ae50b221ff7..dac5893a52b4520d86ed2fcadbfb561a559fcd3d 100644
> --- a/net/ipv4/tcp_minisocks.c
> +++ b/net/ipv4/tcp_minisocks.c
> @@ -449,119 +449,122 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
> struct sk_buff *skb)
> {
> struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
> -
> - if (newsk) {
> - const struct inet_request_sock *ireq = inet_rsk(req);
> - struct tcp_request_sock *treq = tcp_rsk(req);
> - struct inet_connection_sock *newicsk = inet_csk(newsk);
> - struct tcp_sock *newtp = tcp_sk(newsk);
> - struct tcp_sock *oldtp = tcp_sk(sk);
> -
> - smc_check_reset_syn_req(oldtp, req, newtp);
> -
> - /* Now setup tcp_sock */
> - newtp->pred_flags = 0;
> -
> - newtp->rcv_wup = newtp->copied_seq =
> - newtp->rcv_nxt = treq->rcv_isn + 1;
> - newtp->segs_in = 1;
> -
> - newtp->snd_sml = newtp->snd_una =
> - newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
> -
> - INIT_LIST_HEAD(&newtp->tsq_node);
> - INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
> -
> - tcp_init_wl(newtp, treq->rcv_isn);
> -
> - newtp->srtt_us = 0;
> - newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
> - minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
> - newicsk->icsk_rto = TCP_TIMEOUT_INIT;
> - newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
> -
> - newtp->packets_out = 0;
> - newtp->retrans_out = 0;
> - newtp->sacked_out = 0;
> - newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
> - newtp->tlp_high_seq = 0;
> - newtp->lsndtime = tcp_jiffies32;
> - newsk->sk_txhash = treq->txhash;
> - newtp->last_oow_ack_time = 0;
> - newtp->total_retrans = req->num_retrans;
> -
> - /* So many TCP implementations out there (incorrectly) count the
> - * initial SYN frame in their delayed-ACK and congestion control
> - * algorithms that we must have the following bandaid to talk
> - * efficiently to them. -DaveM
> - */
> - newtp->snd_cwnd = TCP_INIT_CWND;
> - newtp->snd_cwnd_cnt = 0;
> -
> - /* There's a bubble in the pipe until at least the first ACK. */
> - newtp->app_limited = ~0U;
> -
> - tcp_init_xmit_timers(newsk);
> - newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
> -
> - newtp->rx_opt.saw_tstamp = 0;
> -
> - newtp->rx_opt.dsack = 0;
> - newtp->rx_opt.num_sacks = 0;
> -
> - newtp->urg_data = 0;
> -
> - if (sock_flag(newsk, SOCK_KEEPOPEN))
> - inet_csk_reset_keepalive_timer(newsk,
> - keepalive_time_when(newtp));
> -
> - newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
> - newtp->rx_opt.sack_ok = ireq->sack_ok;
> - newtp->window_clamp = req->rsk_window_clamp;
> - newtp->rcv_ssthresh = req->rsk_rcv_wnd;
> - newtp->rcv_wnd = req->rsk_rcv_wnd;
> - newtp->rx_opt.wscale_ok = ireq->wscale_ok;
> - if (newtp->rx_opt.wscale_ok) {
> - newtp->rx_opt.snd_wscale = ireq->snd_wscale;
> - newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
> - } else {
> - newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
> - newtp->window_clamp = min(newtp->window_clamp, 65535U);
> - }
> - newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
> - newtp->rx_opt.snd_wscale);
> - newtp->max_window = newtp->snd_wnd;
> -
> - if (newtp->rx_opt.tstamp_ok) {
> - newtp->rx_opt.ts_recent = req->ts_recent;
> - newtp->rx_opt.ts_recent_stamp = get_seconds();
> - newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
> - } else {
> - newtp->rx_opt.ts_recent_stamp = 0;
> - newtp->tcp_header_len = sizeof(struct tcphdr);
> - }
> - newtp->tsoffset = treq->ts_off;
> + const struct inet_request_sock *ireq = inet_rsk(req);
> + struct tcp_request_sock *treq = tcp_rsk(req);
> + struct inet_connection_sock *newicsk;
> + struct tcp_sock *oldtp, *newtp;
> +
> + if (!newsk)
> + return NULL;
> +
> + newicsk = inet_csk(newsk);
> + newtp = tcp_sk(newsk);
> + oldtp = tcp_sk(sk);
> +
> + smc_check_reset_syn_req(oldtp, req, newtp);
> +
> + /* Now setup tcp_sock */
> + newtp->pred_flags = 0;
> +
> + newtp->rcv_wup = newtp->copied_seq =
> + newtp->rcv_nxt = treq->rcv_isn + 1;
> + newtp->segs_in = 1;
> +
> + newtp->snd_sml = newtp->snd_una =
> + newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
> +
> + INIT_LIST_HEAD(&newtp->tsq_node);
> + INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
> +
> + tcp_init_wl(newtp, treq->rcv_isn);
> +
> + newtp->srtt_us = 0;
> + newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
> + minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
> + newicsk->icsk_rto = TCP_TIMEOUT_INIT;
> + newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
> +
> + newtp->packets_out = 0;
> + newtp->retrans_out = 0;
> + newtp->sacked_out = 0;
> + newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
> + newtp->tlp_high_seq = 0;
> + newtp->lsndtime = tcp_jiffies32;
> + newsk->sk_txhash = treq->txhash;
> + newtp->last_oow_ack_time = 0;
> + newtp->total_retrans = req->num_retrans;
> +
> + /* So many TCP implementations out there (incorrectly) count the
> + * initial SYN frame in their delayed-ACK and congestion control
> + * algorithms that we must have the following bandaid to talk
> + * efficiently to them. -DaveM
> + */
> + newtp->snd_cwnd = TCP_INIT_CWND;
> + newtp->snd_cwnd_cnt = 0;
> +
> + /* There's a bubble in the pipe until at least the first ACK. */
> + newtp->app_limited = ~0U;
> +
> + tcp_init_xmit_timers(newsk);
> + newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
> +
> + newtp->rx_opt.saw_tstamp = 0;
> +
> + newtp->rx_opt.dsack = 0;
> + newtp->rx_opt.num_sacks = 0;
> +
> + newtp->urg_data = 0;
> +
> + if (sock_flag(newsk, SOCK_KEEPOPEN))
> + inet_csk_reset_keepalive_timer(newsk,
> + keepalive_time_when(newtp));
> +
> + newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
> + newtp->rx_opt.sack_ok = ireq->sack_ok;
> + newtp->window_clamp = req->rsk_window_clamp;
> + newtp->rcv_ssthresh = req->rsk_rcv_wnd;
> + newtp->rcv_wnd = req->rsk_rcv_wnd;
> + newtp->rx_opt.wscale_ok = ireq->wscale_ok;
> + if (newtp->rx_opt.wscale_ok) {
> + newtp->rx_opt.snd_wscale = ireq->snd_wscale;
> + newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
> + } else {
> + newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
> + newtp->window_clamp = min(newtp->window_clamp, 65535U);
> + }
> + newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
> + newtp->max_window = newtp->snd_wnd;
> +
> + if (newtp->rx_opt.tstamp_ok) {
> + newtp->rx_opt.ts_recent = req->ts_recent;
> + newtp->rx_opt.ts_recent_stamp = get_seconds();
> + newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
> + } else {
> + newtp->rx_opt.ts_recent_stamp = 0;
> + newtp->tcp_header_len = sizeof(struct tcphdr);
> + }
> + newtp->tsoffset = treq->ts_off;
> #ifdef CONFIG_TCP_MD5SIG
> - newtp->md5sig_info = NULL; /*XXX*/
> - if (newtp->af_specific->md5_lookup(sk, newsk))
> - newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
> + newtp->md5sig_info = NULL; /*XXX*/
> + if (newtp->af_specific->md5_lookup(sk, newsk))
> + newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
> #endif
> - if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
> - newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
> - newtp->rx_opt.mss_clamp = req->mss;
> - tcp_ecn_openreq_child(newtp, req);
> - newtp->fastopen_req = NULL;
> - newtp->fastopen_rsk = NULL;
> - newtp->syn_data_acked = 0;
> - newtp->rack.mstamp = 0;
> - newtp->rack.advanced = 0;
> - newtp->rack.reo_wnd_steps = 1;
> - newtp->rack.last_delivered = 0;
> - newtp->rack.reo_wnd_persist = 0;
> - newtp->rack.dsack_seen = 0;
> + if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
> + newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
> + newtp->rx_opt.mss_clamp = req->mss;
> + tcp_ecn_openreq_child(newtp, req);
> + newtp->fastopen_req = NULL;
> + newtp->fastopen_rsk = NULL;
> + newtp->syn_data_acked = 0;
> + newtp->rack.mstamp = 0;
> + newtp->rack.advanced = 0;
> + newtp->rack.reo_wnd_steps = 1;
> + newtp->rack.last_delivered = 0;
> + newtp->rack.reo_wnd_persist = 0;
> + newtp->rack.dsack_seen = 0;
> +
> + __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
>
> - __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
> - }
> return newsk;
> }
> EXPORT_SYMBOL(tcp_create_openreq_child);
> --
> 2.18.0.rc2.346.g013aa6912e-goog
>
Powered by blists - more mailing lists