[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANn89iLEC4Gwr1P8x3tpFVFObvB4nM5xt0F=nRBNe1hqYOLU9A@mail.gmail.com>
Date: Thu, 7 Nov 2024 10:31:21 +0100
From: Eric Dumazet <edumazet@...gle.com>
To: chia-yu.chang@...ia-bell-labs.com
Cc: netdev@...r.kernel.org, dsahern@...il.com, davem@...emloft.net,
dsahern@...nel.org, pabeni@...hat.com, joel.granados@...nel.org,
kuba@...nel.org, andrew+netdev@...n.ch, horms@...nel.org, pablo@...filter.org,
kadlec@...filter.org, netfilter-devel@...r.kernel.org, coreteam@...filter.org,
ij@...nel.org, ncardwell@...gle.com, koen.de_schepper@...ia-bell-labs.com,
g.white@...lelabs.com, ingemar.s.johansson@...csson.com,
mirja.kuehlewind@...csson.com, cheshire@...le.com, rs.ietf@....at,
Jason_Livingood@...cast.com, vidhi_goel@...le.com
Subject: Re: [PATCH v5 net-next 01/13] tcp: reorganize tcp_in_ack_event() and tcp_count_delivered()
On Tue, Nov 5, 2024 at 11:07 AM <chia-yu.chang@...ia-bell-labs.com> wrote:
>
> From: Ilpo Järvinen <ij@...nel.org>
>
> - Move tcp_count_delivered() earlier and split tcp_count_delivered_ce()
> out of it
> - Move tcp_in_ack_event() later
> - While at it, remove the inline from tcp_in_ack_event() and let
> the compiler to decide
>
> Accurate ECN's heuristics does not know if there is going
> to be ACE field based CE counter increase or not until after
> rtx queue has been processed. Only then the number of ACKed
> bytes/pkts is available. As CE or not affects presence of
> FLAG_ECE, that information for tcp_in_ack_event is not yet
> available in the old location of the call to tcp_in_ack_event().
>
> Signed-off-by: Ilpo Järvinen <ij@...nel.org>
> Signed-off-by: Chia-Yu Chang <chia-yu.chang@...ia-bell-labs.com>
> ---
> net/ipv4/tcp_input.c | 56 +++++++++++++++++++++++++-------------------
> 1 file changed, 32 insertions(+), 24 deletions(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index 5bdf13ac26ef..fc52eab4fcc9 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -413,6 +413,20 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
> return false;
> }
>
> +static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
> +{
> + tp->delivered_ce += ecn_count;
> +}
> +
> +/* Updates the delivered and delivered_ce counts */
> +static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
> + bool ece_ack)
> +{
> + tp->delivered += delivered;
> + if (ece_ack)
> + tcp_count_delivered_ce(tp, delivered);
> +}
> +
> /* Buffer size and advertised window tuning.
> *
> * 1. Tuning sk->sk_sndbuf, when connection enters established state.
> @@ -1148,15 +1162,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
> }
> }
>
> -/* Updates the delivered and delivered_ce counts */
> -static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
> - bool ece_ack)
> -{
> - tp->delivered += delivered;
> - if (ece_ack)
> - tp->delivered_ce += delivered;
> -}
> -
> /* This procedure tags the retransmission queue when SACKs arrive.
> *
> * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
> @@ -3856,12 +3861,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
> }
> }
>
> -static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
> +static void tcp_in_ack_event(struct sock *sk, int flag)
> {
> const struct inet_connection_sock *icsk = inet_csk(sk);
>
> - if (icsk->icsk_ca_ops->in_ack_event)
> - icsk->icsk_ca_ops->in_ack_event(sk, flags);
> + if (icsk->icsk_ca_ops->in_ack_event) {
> + u32 ack_ev_flags = 0;
> +
> + if (flag & FLAG_WIN_UPDATE)
> + ack_ev_flags |= CA_ACK_WIN_UPDATE;
> + if (flag & FLAG_SLOWPATH) {
> + ack_ev_flags = CA_ACK_SLOWPATH;
This is removing the potential CA_ACK_WIN_UPDATE, I would suggest :
ack_ev_flags |= CA_ACK_SLOWPATH;
> + if (flag & FLAG_ECE)
> + ack_ev_flags |= CA_ACK_ECE;
> + }
> +
> + icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags);
> + }
> }
>
>
Powered by blists - more mailing lists