[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALOAHbC4vZvpoGaNfOsox5L72QTz0OiaXerH6UVBnNNHjWONbQ@mail.gmail.com>
Date: Sun, 9 Sep 2018 01:42:40 +0800
From: Yafang Shao <laoar.shao@...il.com>
To: Eric Dumazet <edumazet@...gle.com>,
David Miller <davem@...emloft.net>
Cc: netdev <netdev@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>,
Yafang Shao <laoar.shao@...il.com>
Subject: Re: [PATCH net-next] tcp: show number of network segments in some
SNMP counters
On Sun, Sep 9, 2018 at 12:58 AM, Yafang Shao <laoar.shao@...il.com> wrote:
> It is better to show the number of network segments in bellow SNMP
> counters, because that could be more useful for the user.
> For example, the user could easily figure out how mant packets are
> dropped and how many packets are queued in the out-of-oder queue.
>
> - LINUX_MIB_TCPRCVQDROP
> - LINUX_MIB_TCPZEROWINDOWDROP
> - LINUX_MIB_TCPBACKLOGDROP
> - LINUX_MIB_TCPMINTTLDROP
> - LINUX_MIB_TCPOFODROP
> - LINUX_MIB_TCPOFOQUEUE
>
> Signed-off-by: Yafang Shao <laoar.shao@...il.com>
> ---
> net/ipv4/tcp_input.c | 18 ++++++++++++------
> net/ipv4/tcp_ipv4.c | 9 ++++++---
> net/ipv6/tcp_ipv6.c | 6 ++++--
> 3 files changed, 22 insertions(+), 11 deletions(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index 62508a2..90f449b 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -4496,7 +4496,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
> tcp_ecn_check_ce(sk, skb);
>
> if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
> - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
> + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP,
> + tcp_skb_pcount(skb));
> tcp_drop(sk, skb);
> return;
> }
> @@ -4505,7 +4506,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
> tp->pred_flags = 0;
> inet_csk_schedule_ack(sk);
>
> - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
> + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE,
> + tcp_skb_pcount(skb));
> seq = TCP_SKB_CB(skb)->seq;
> end_seq = TCP_SKB_CB(skb)->end_seq;
> SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
> @@ -4666,7 +4668,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
> skb->len = size;
>
> if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
> - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
> + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP,
> + tcp_skb_pcount(skb));
> goto err_free;
> }
>
> @@ -4725,7 +4728,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
> */
> if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
> if (tcp_receive_window(tp) == 0) {
> - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
> + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP,
> + tcp_skb_pcount(skb));
> goto out_of_window;
> }
>
> @@ -4734,7 +4738,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
> if (skb_queue_len(&sk->sk_receive_queue) == 0)
> sk_forced_mem_schedule(sk, skb->truesize);
> else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
> - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
> + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP,
> + tcp_skb_pcount(skb));
> goto drop;
> }
>
> @@ -4796,7 +4801,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
> * remembering D-SACK for its head made in previous line.
> */
> if (!tcp_receive_window(tp)) {
> - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
> + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP,
> + tcp_skb_pcount(skb));
> goto out_of_window;
> }
> goto queue_and_out;
> diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
> index 09547ef..f2fe14b 100644
> --- a/net/ipv4/tcp_ipv4.c
> +++ b/net/ipv4/tcp_ipv4.c
> @@ -475,7 +475,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
> goto out;
>
> if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
> - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> + __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> + tcp_skb_pcount(skb));
> goto out;
> }
>
> @@ -1633,7 +1634,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
>
> if (unlikely(sk_add_backlog(sk, skb, limit))) {
> bh_unlock_sock(sk);
> - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
> + __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP,
> + tcp_skb_pcount(skb));
> return true;
> }
> return false;
> @@ -1790,7 +1792,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
> }
> }
> if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
> - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> + __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> + tcp_skb_pcount(skb));
> goto discard_and_relse;
> }
>
> diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
> index 03e6b7a..97dfc16 100644
> --- a/net/ipv6/tcp_ipv6.c
> +++ b/net/ipv6/tcp_ipv6.c
> @@ -391,7 +391,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
> goto out;
>
> if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
> - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> + __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> + tcp_skb_pcount(skb));
> goto out;
> }
>
> @@ -1523,7 +1524,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
> }
> }
> if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
> - __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> + __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> + tcp_skb_pcount(skb));
> goto discard_and_relse;
> }
>
> --
> 1.8.3.1
>
Seems it is not proper to use tcp_skb_pcount(skb). Will send V2.
Sorry about the noise.
Thanks
Yafang
Powered by blists - more mailing lists