[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <ZBAQU2qJg6kcud50@localhost.localdomain>
Date: Tue, 14 Mar 2023 07:12:35 +0100
From: Michal Swiatkowski <michal.swiatkowski@...ux.intel.com>
To: Shradha Gupta <shradhagupta@...ux.microsoft.com>
Cc: linux-kernel@...r.kernel.org, linux-hyperv@...r.kernel.org,
linux-rdma@...r.kernel.org, netdev@...r.kernel.org,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Ajay Sharma <sharmaajay@...rosoft.com>,
Leon Romanovsky <leon@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>, Dexuan Cui <decui@...rosoft.com>,
Long Li <longli@...rosoft.com>,
Michael Kelley <mikelley@...rosoft.com>
Subject: Re: [PATCH] net: mana: Add new MANA VF performance counters for
easier troubleshooting
On Mon, Mar 13, 2023 at 10:30:10PM -0700, Shradha Gupta wrote:
> Extended performance counter stats in 'ethtool -S <interface>' output
> for MANA VF to facilitate troubleshooting.
>
> Tested-on: Ubuntu22
> Signed-off-by: Shradha Gupta <shradhagupta@...ux.microsoft.com>
> ---
> drivers/net/ethernet/microsoft/mana/mana_en.c | 67 ++++++++++++++++++-
> .../ethernet/microsoft/mana/mana_ethtool.c | 52 +++++++++++++-
> include/net/mana/mana.h | 18 +++++
> 3 files changed, 133 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
> index 6120f2b6684f..9762bdda6df1 100644
> --- a/drivers/net/ethernet/microsoft/mana/mana_en.c
> +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
> @@ -156,6 +156,8 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
> struct mana_txq *txq;
> struct mana_cq *cq;
> int err, len;
> + u16 ihs;
> + int hopbyhop = 0;
RCT
>
> if (unlikely(!apc->port_is_up))
> goto tx_drop;
> @@ -166,6 +168,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
> txq = &apc->tx_qp[txq_idx].txq;
> gdma_sq = txq->gdma_sq;
> cq = &apc->tx_qp[txq_idx].tx_cq;
> + tx_stats = &txq->stats;
>
> pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
> pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
> @@ -179,10 +182,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
>
> pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
>
> - if (pkt_fmt == MANA_SHORT_PKT_FMT)
> + if (pkt_fmt == MANA_SHORT_PKT_FMT) {
> pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
> - else
> + u64_stats_update_begin(&tx_stats->syncp);
> + tx_stats->short_pkt_fmt++;
> + u64_stats_update_end(&tx_stats->syncp);
> + } else {
> pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
> + u64_stats_update_begin(&tx_stats->syncp);
> + tx_stats->long_pkt_fmt++;
> + u64_stats_update_end(&tx_stats->syncp);
> + }
>
> pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
> pkg.wqe_req.flags = 0;
> @@ -232,9 +242,37 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
> &ipv6_hdr(skb)->daddr, 0,
> IPPROTO_TCP, 0);
> }
> +
> + if (skb->encapsulation) {
> + ihs = skb_inner_tcp_all_headers(skb);
> + u64_stats_update_begin(&tx_stats->syncp);
> + tx_stats->tso_inner_packets++;
> + tx_stats->tso_inner_bytes += skb->len - ihs;
> + u64_stats_update_end(&tx_stats->syncp);
> + } else {
hopbyhop can be defined here
> + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
> + ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
> + } else {
> + ihs = skb_tcp_all_headers(skb);
> + if (ipv6_has_hopopt_jumbo(skb)) {
> + hopbyhop = sizeof(struct hop_jumbo_hdr);
> + ihs -= sizeof(struct hop_jumbo_hdr);
> + }
Maybe I missed sth, but it looks like this part of code can be removed.
hopbyhop is only used to calculate tso_bytes. Instead of substract
hopbyhop from ihs, and calculate tso_bytes as len - ihs - hopbyhop, You
can remove hopbyhop and calculate tso_bytes like len - ihs.
> + }
> +
> + u64_stats_update_begin(&tx_stats->syncp);
> + tx_stats->tso_packets++;
> + tx_stats->tso_bytes += skb->len - ihs - hopbyhop;
> + u64_stats_update_end(&tx_stats->syncp);
> + }
> +
>
[...]
> @@ -1341,11 +1394,17 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
> {
> struct gdma_comp *comp = cq->gdma_comp_buf;
> struct mana_rxq *rxq = cq->rxq;
> + struct net_device *ndev;
> + struct mana_port_context *apc;
RCT
> int comp_read, i;
>
> + ndev = rxq->ndev;
> + apc = netdev_priv(ndev);
maybe:
apc = netdev_priv(rxq->ndev);
> +
> comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
> WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
>
> + apc->eth_stats.rx_cqes = comp_read;
> rxq->xdp_flush = false;
>
> for (i = 0; i < comp_read; i++) {
> @@ -1357,6 +1416,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
> return;
>
> mana_process_rx_cqe(rxq, cq, &comp[i]);
> +
> + apc->eth_stats.rx_cqes--;
> }
>
> if (rxq->xdp_flush)
>
[...]
> --
> 2.37.2
>
Powered by blists - more mailing lists