lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAK6E8=d54S6v8_QGysHhDdxm3QK6N-ZZ7KHcZdbKMfEvmdRLsA@mail.gmail.com>
Date:	Thu, 30 Apr 2015 10:26:02 -0700
From:	Yuchung Cheng <ycheng@...gle.com>
To:	Kenneth Klette Jonassen <kennetkl@....uio.no>
Cc:	netdev <netdev@...r.kernel.org>, Eric Dumazet <edumazet@...gle.com>
Subject: Re: [PATCH net-next 1/3] tcp: move struct tcp_sacktag_state to tcp_ack()

On Thu, Apr 30, 2015 at 9:23 AM, Kenneth Klette Jonassen
<kennetkl@....uio.no> wrote:
> Later patch passes two values set in tcp_sacktag_one() to
> tcp_clean_rtx_queue(). Prepare passing them via struct tcp_sacktag_state.
>
> Cc: Yuchung Cheng <ycheng@...gle.com>
> Cc: Eric Dumazet <edumazet@...gle.com>
> Signed-off-by: Kenneth Klette Jonassen <kennetkl@....uio.no>
Acked-by: Yuchung Cheng <ycheng@...gle.com>

> ---
>  net/ipv4/tcp_input.c | 45 ++++++++++++++++++++++-----------------------
>  1 file changed, 22 insertions(+), 23 deletions(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index bc790ea..9902cf1 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -1634,7 +1634,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
>
>  static int
>  tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
> -                       u32 prior_snd_una, long *sack_rtt_us)
> +                       u32 prior_snd_una, struct tcp_sacktag_state *state)
>  {
>         struct tcp_sock *tp = tcp_sk(sk);
>         const unsigned char *ptr = (skb_transport_header(ack_skb) +
> @@ -1642,7 +1642,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>         struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
>         struct tcp_sack_block sp[TCP_NUM_SACKS];
>         struct tcp_sack_block *cache;
> -       struct tcp_sacktag_state state;
>         struct sk_buff *skb;
>         int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
>         int used_sacks;
> @@ -1650,9 +1649,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>         int i, j;
>         int first_sack_index;
>
> -       state.flag = 0;
> -       state.reord = tp->packets_out;
> -       state.rtt_us = -1L;
> +       state->flag = 0;
> +       state->reord = tp->packets_out;
>
>         if (!tp->sacked_out) {
>                 if (WARN_ON(tp->fackets_out))
> @@ -1663,7 +1661,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>         found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
>                                          num_sacks, prior_snd_una);
>         if (found_dup_sack)
> -               state.flag |= FLAG_DSACKING_ACK;
> +               state->flag |= FLAG_DSACKING_ACK;
>
>         /* Eliminate too old ACKs, but take into
>          * account more or less fresh ones, they can
> @@ -1728,7 +1726,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>         }
>
>         skb = tcp_write_queue_head(sk);
> -       state.fack_count = 0;
> +       state->fack_count = 0;
>         i = 0;
>
>         if (!tp->sacked_out) {
> @@ -1762,10 +1760,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>
>                         /* Head todo? */
>                         if (before(start_seq, cache->start_seq)) {
> -                               skb = tcp_sacktag_skip(skb, sk, &state,
> +                               skb = tcp_sacktag_skip(skb, sk, state,
>                                                        start_seq);
>                                 skb = tcp_sacktag_walk(skb, sk, next_dup,
> -                                                      &state,
> +                                                      state,
>                                                        start_seq,
>                                                        cache->start_seq,
>                                                        dup_sack);
> @@ -1776,7 +1774,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>                                 goto advance_sp;
>
>                         skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
> -                                                      &state,
> +                                                      state,
>                                                        cache->end_seq);
>
>                         /* ...tail remains todo... */
> @@ -1785,12 +1783,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>                                 skb = tcp_highest_sack(sk);
>                                 if (!skb)
>                                         break;
> -                               state.fack_count = tp->fackets_out;
> +                               state->fack_count = tp->fackets_out;
>                                 cache++;
>                                 goto walk;
>                         }
>
> -                       skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
> +                       skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
>                         /* Check overlap against next cached too (past this one already) */
>                         cache++;
>                         continue;
> @@ -1800,12 +1798,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
>                         skb = tcp_highest_sack(sk);
>                         if (!skb)
>                                 break;
> -                       state.fack_count = tp->fackets_out;
> +                       state->fack_count = tp->fackets_out;
>                 }
> -               skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
> +               skb = tcp_sacktag_skip(skb, sk, state, start_seq);
>
>  walk:
> -               skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
> +               skb = tcp_sacktag_walk(skb, sk, next_dup, state,
>                                        start_seq, end_seq, dup_sack);
>
>  advance_sp:
> @@ -1820,9 +1818,9 @@ advance_sp:
>         for (j = 0; j < used_sacks; j++)
>                 tp->recv_sack_cache[i++] = sp[j];
>
> -       if ((state.reord < tp->fackets_out) &&
> +       if ((state->reord < tp->fackets_out) &&
>             ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
> -               tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
> +               tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
>
>         tcp_mark_lost_retrans(sk);
>         tcp_verify_left_out(tp);
> @@ -1834,8 +1832,7 @@ out:
>         WARN_ON((int)tp->retrans_out < 0);
>         WARN_ON((int)tcp_packets_in_flight(tp) < 0);
>  #endif
> -       *sack_rtt_us = state.rtt_us;
> -       return state.flag;
> +       return state->flag;
>  }
>
>  /* Limits sacked_out so that sum with lost_out isn't ever larger than
> @@ -3459,6 +3456,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
>  {
>         struct inet_connection_sock *icsk = inet_csk(sk);
>         struct tcp_sock *tp = tcp_sk(sk);
> +       struct tcp_sacktag_state sack_state;
>         u32 prior_snd_una = tp->snd_una;
>         u32 ack_seq = TCP_SKB_CB(skb)->seq;
>         u32 ack = TCP_SKB_CB(skb)->ack_seq;
> @@ -3467,7 +3465,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
>         int prior_packets = tp->packets_out;
>         const int prior_unsacked = tp->packets_out - tp->sacked_out;
>         int acked = 0; /* Number of packets newly acked */
> -       long sack_rtt_us = -1L;
> +
> +       sack_state.rtt_us = -1L;
>
>         /* We very likely will need to access write queue head. */
>         prefetchw(sk->sk_write_queue.next);
> @@ -3531,7 +3530,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
>
>                 if (TCP_SKB_CB(skb)->sacked)
>                         flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
> -                                                       &sack_rtt_us);
> +                                                       &sack_state);
>
>                 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
>                         flag |= FLAG_ECE;
> @@ -3556,7 +3555,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
>         /* See if we can take anything off of the retransmit queue. */
>         acked = tp->packets_out;
>         flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
> -                                   sack_rtt_us);
> +                                   sack_state.rtt_us);
>         acked -= tp->packets_out;
>
>         /* Advance cwnd if state allows */
> @@ -3608,7 +3607,7 @@ old_ack:
>          */
>         if (TCP_SKB_CB(skb)->sacked) {
>                 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
> -                                               &sack_rtt_us);
> +                                               &sack_state);
>                 tcp_fastretrans_alert(sk, acked, prior_unsacked,
>                                       is_dupack, flag);
>         }
> --
> 2.1.0
>
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ