lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y2m+OHPaS6aV6GYx@lunn.ch>
Date:   Tue, 8 Nov 2022 03:26:00 +0100
From:   Andrew Lunn <andrew@...n.ch>
To:     Shenwei Wang <shenwei.wang@....com>
Cc:     "David S. Miller" <davem@...emloft.net>,
        Eric Dumazet <edumazet@...gle.com>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Jesper Dangaard Brouer <hawk@...nel.org>,
        John Fastabend <john.fastabend@...il.com>,
        netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
        imx@...ts.linux.dev
Subject: Re: [PATCH 2/2] net: fec: add xdp and page pool statistics

> +enum {
> +	RX_XDP_REDIRECT = 0,
> +	RX_XDP_PASS,
> +	RX_XDP_DROP,
> +	RX_XDP_TX,
> +	RX_XDP_TX_ERRORS,
> +	TX_XDP_XMIT,
> +	TX_XDP_XMIT_ERRORS,
> +	XDP_STATS_TOTAL,
> +};
> +
>  struct fec_enet_priv_tx_q {
>  	struct bufdesc_prop bd;
>  	unsigned char *tx_bounce[TX_RING_SIZE];
> @@ -546,6 +557,7 @@ struct fec_enet_priv_rx_q {
>  	/* page_pool */
>  	struct page_pool *page_pool;
>  	struct xdp_rxq_info xdp_rxq;
> +	u32 stats[XDP_STATS_TOTAL];
>  
>  	/* rx queue number, in the range 0-7 */
>  	u8 id;
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index 3fb870340c22..89fef370bc10 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -1523,10 +1523,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
>  
>  	switch (act) {
>  	case XDP_PASS:
> +		rxq->stats[RX_XDP_PASS]++;
>  		ret = FEC_ENET_XDP_PASS;
>  		break;
>  
>  	case XDP_REDIRECT:
> +		rxq->stats[RX_XDP_REDIRECT]++;
>  		err = xdp_do_redirect(fep->netdev, xdp, prog);
>  		if (!err) {
>  			ret = FEC_ENET_XDP_REDIR;
> @@ -1549,6 +1551,7 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
>  		fallthrough;    /* handle aborts by dropping packet */
>  
>  	case XDP_DROP:
> +		rxq->stats[RX_XDP_DROP]++;
>  		ret = FEC_ENET_XDP_CONSUMED;
>  		page = virt_to_head_page(xdp->data);
>  		page_pool_put_page(rxq->page_pool, page, sync, true);
> @@ -2657,37 +2660,91 @@ static const struct fec_stat {
>  	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
>  };
>  
> -#define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
> +static struct fec_xdp_stat {
> +	char name[ETH_GSTRING_LEN];
> +	u64 count;
> +} fec_xdp_stats[XDP_STATS_TOTAL] = {
> +	{ "rx_xdp_redirect", 0 },           /* RX_XDP_REDIRECT = 0, */
> +	{ "rx_xdp_pass", 0 },               /* RX_XDP_PASS, */
> +	{ "rx_xdp_drop", 0 },               /* RX_XDP_DROP, */
> +	{ "rx_xdp_tx", 0 },                 /* RX_XDP_TX, */
> +	{ "rx_xdp_tx_errors", 0 },          /* RX_XDP_TX_ERRORS, */
> +	{ "tx_xdp_xmit", 0 },               /* TX_XDP_XMIT, */
> +	{ "tx_xdp_xmit_errors", 0 },        /* TX_XDP_XMIT_ERRORS, */
> +};

Why do you mix the string and the count?

> +
> +#define FEC_STATS_SIZE	((ARRAY_SIZE(fec_stats) + \
> +			ARRAY_SIZE(fec_xdp_stats)) * sizeof(u64))
>  
>  static void fec_enet_update_ethtool_stats(struct net_device *dev)
>  {
>  	struct fec_enet_private *fep = netdev_priv(dev);
> -	int i;
> +	struct fec_xdp_stat xdp_stats[7];

You are allocating 7 x name[ETH_GSTRING_LEN], here, which you are not
going to use. All you really need is u64 xdp_stats[XDP_STATS_TOTAL]

> +	int off = ARRAY_SIZE(fec_stats);
> +	struct fec_enet_priv_rx_q *rxq;
> +	int i, j;
>  
>  	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
>  		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
> +
> +	for (i = fep->num_rx_queues - 1; i >= 0; i--) {
> +		rxq = fep->rx_queue[i];
> +		for (j = 0; j < XDP_STATS_TOTAL; j++)
> +			xdp_stats[j].count += rxq->stats[j];
> +	}
> +
> +	for (i = 0; i < XDP_STATS_TOTAL; i++)
> +		fep->ethtool_stats[i + off] = xdp_stats[i].count;

It would be more logical to use j here.

It is also pretty messy. For fec_enet_get_strings() and
fec_enet_get_sset_count() you deal with the three sets of stats
individually. But here you combine normal stats and xdp stats in
one. It will probably come out cleaner if you keep it all separate.

>  static void fec_enet_get_ethtool_stats(struct net_device *dev,
>  				       struct ethtool_stats *stats, u64 *data)
>  {
>  	struct fec_enet_private *fep = netdev_priv(dev);
> +	u64 *dst = data + FEC_STATS_SIZE / 8;

Why 8? sizeof(u64) would be a bit clearer. Or just use
ARRAY_SIZE(fec_stats) which is what you are actually wanting.

>  
>  	if (netif_running(dev))
>  		fec_enet_update_ethtool_stats(dev);
>  
>  	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
> +
> +	fec_enet_page_pool_stats(fep, dst);
>  }
>  
>  static void fec_enet_get_strings(struct net_device *netdev,
>  	u32 stringset, u8 *data)
>  {
> +	int off = ARRAY_SIZE(fec_stats);
>  	int i;
>  	switch (stringset) {
>  	case ETH_SS_STATS:
>  		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
>  			memcpy(data + i * ETH_GSTRING_LEN,
>  				fec_stats[i].name, ETH_GSTRING_LEN);
> +		for (i = 0; i < ARRAY_SIZE(fec_xdp_stats); i++)
> +			memcpy(data + (i + off) * ETH_GSTRING_LEN,
> +			       fec_xdp_stats[i].name, ETH_GSTRING_LEN);
> +		off = (i + off) * ETH_GSTRING_LEN;
> +		page_pool_ethtool_stats_get_strings(data + off);

Probably simpler is:

		for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
			memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		for (i = 0; i < ARRAY_SIZE(fec_xdp_stats); i++) {
			memcpy(data, fec_xdp_stats[i].name, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		page_pool_ethtool_stats_get_strings(data);
		
	Andrew

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ