lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zp_kQX3dj3J1_u6o@LQ3V64L9R2>
Date: Tue, 23 Jul 2024 10:11:29 -0700
From: Joe Damato <jdamato@...tly.com>
To: Roger Quadros <rogerq@...nel.org>
Cc: "David S. Miller" <davem@...emloft.net>,
	Eric Dumazet <edumazet@...gle.com>,
	Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
	Siddharth Vadapalli <s-vadapalli@...com>,
	Julien Panis <jpanis@...libre.com>, Simon Horman <horms@...nel.org>,
	Andrew Lunn <andrew@...n.ch>, srk@...com, vigneshr@...com,
	danishanwar@...com, pekka Varis <p-varis@...com>,
	netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
	linux-omap@...r.kernel.org
Subject: Re: [PATCH net-next v3 1/6] net: ethernet: ti: am65-cpsw: Introduce
 multi queue Rx

On Wed, Jul 03, 2024 at 04:51:32PM +0300, Roger Quadros wrote:

[...]

> @@ -699,6 +727,14 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
>  		goto fail_rx;
>  	}
>  
> +	for (i = 0; i < common->rx_ch_num_flows ; i++) {
> +		napi_enable(&common->rx_chns.flows[i].napi_rx);
> +		if (common->rx_chns.flows[i].irq_disabled) {
> +			common->rx_chns.flows[i].irq_disabled = false;

Just a minor nit (not a reason to hold this back): I've been
encouraging folks to use the new netdev-genl APIs in their drivers
to map NAPIs to queues and IRQs if possible because it allows for
more expressive and interesting userland applications.

You may consider in the future using something vaguely like (this is
untested psuedo-code I just typed out):

   netif_napi_set_irq(&common->rx_chns.flows[i].napi_rx,
                      common->rx_chns.flows[i].irq);

and 

   netif_queue_set_napi(common->dev, i, NETDEV_QUEUE_TYPE_RX,
                        &common->rx_chns.flows[i].napi_rx);

To link everything together (note that RTNL must be held while doing
this -- I haven't checked your code path to see if that is true here).

For an example, see 64b62146ba9e ("net/mlx4: link NAPI instances to
queues and IRQs). 

Doing this would allow userland to get data via netlink, which you
can examine yourself by using cli.py like this:

python3 tools/net/ynl/cli.py \
  --spec Documentation/netlink/specs/netdev.yaml \
  --dump queue-get

python3 tools/net/ynl/cli.py \
  --spec Documentation/netlink/specs/netdev.yaml \
  --dump napi-get

> +			enable_irq(common->rx_chns.flows[i].irq);
> +		}
> +	}
> +
>  	for (tx = 0; tx < common->tx_ch_num; tx++) {
>  		ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
>  		if (ret) {
> @@ -710,12 +746,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
>  		napi_enable(&tx_chn[tx].napi_tx);
>  	}
>  
> -	napi_enable(&common->napi_rx);
> -	if (common->rx_irq_disabled) {
> -		common->rx_irq_disabled = false;
> -		enable_irq(rx_chn->irq);
> -	}
> -
>  	dev_dbg(common->dev, "cpsw_nuss started\n");
>  	return 0;
>  
> @@ -726,11 +756,24 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
>  		tx--;
>  	}
>  
> +	for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
> +		flow = &rx_chn->flows[flow_idx];
> +		if (!flow->irq_disabled) {
> +			disable_irq(flow->irq);
> +			flow->irq_disabled = true;
> +		}
> +		napi_disable(&flow->napi_rx);
> +	}
> +
>  	k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
>  
>  fail_rx:
> -	k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
> -				  am65_cpsw_nuss_rx_cleanup, 0);
> +	for (i = 0; i < common->rx_ch_num_flows; i--)
> +		k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
> +					  am65_cpsw_nuss_rx_cleanup, !!i);
> +
> +	am65_cpsw_destroy_xdp_rxqs(common);
> +
>  	return ret;
>  }
>  
> @@ -779,12 +822,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
>  			dev_err(common->dev, "rx teardown timeout\n");
>  	}
>  
> -	napi_disable(&common->napi_rx);
> -	hrtimer_cancel(&common->rx_hrtimer);
> -
> -	for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
> -		k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
> +	for (i = 0; i < common->rx_ch_num_flows; i++) {
> +		napi_disable(&common->rx_chns.flows[i].napi_rx);

The inverse of the above is probably true somewhere around here;
again a small piece of psuedo code for illustrative purposes:

   netif_queue_set_napi(common->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);

> +		hrtimer_cancel(&common->rx_chns.flows[i].rx_hrtimer);
> +		k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
>  					  am65_cpsw_nuss_rx_cleanup, !!i);
> +	}
>  
>  	k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
>  

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ