diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8e0f6ae..165040e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5395,6 +5395,12 @@ discard: return 0; } +/* + * Returns: + * +1 on reset, + * 0 success and/or SYNACK data, + * -1 on discard. + */ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) { @@ -5403,6 +5409,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; int saved_clamp = tp->rx_opt.mss_clamp; + int queued = 0; tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); @@ -5509,6 +5516,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, - TCPOLEN_COOKIE_BASE; int cookie_pair_size = cookie_size + cvp->cookie_desired; + int tcp_header_len = tcp_header_len_th(th); /* A cookie extension option was sent and returned. * Note that each incoming SYNACK replaces the @@ -5524,6 +5532,19 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, hash_location, cookie_size); cvp->cookie_pair_size = cookie_pair_size; } + + queued = skb->len - tcp_header_len; + if (queued > 0) { + /* Queue incoming transaction data. */ + __skb_pull(skb, tcp_header_len); + __skb_queue_tail(&sk->sk_receive_queue, skb); + skb_set_owner_r(skb, sk); + sk->sk_data_ready(sk, 0); + cvp->s_data_in = 1; /* true */ + tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; + tp->rcv_wup = TCP_SKB_CB(skb)->end_seq; + tp->copied_seq = TCP_SKB_CB(skb)->seq + 1; + } } smp_mb(); @@ -5577,11 +5598,14 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, TCP_DELACK_MAX, TCP_RTO_MAX); discard: - __kfree_skb(skb); + if (queued <= 0) + __kfree_skb(skb); return 0; } else { tcp_send_ack(sk); } + if (queued > 0) + return 0; return -1; } -- 1.6.3.3