lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 1 Mar 2022 09:16:19 +0000
From:   Martin Habets <habetsm.xilinx@...il.com>
To:     Íñigo Huguet <ihuguet@...hat.com>
Cc:     ecree.xilinx@...il.com, davem@...emloft.net, kuba@...nel.org,
        netdev@...r.kernel.org
Subject: Re: [PATCH v2 net-next 1/2] sfc: default config to 1 channel/core in
 local NUMA node only

On Mon, Feb 28, 2022 at 02:22:53PM +0100, Íñigo Huguet wrote:
> Handling channels from CPUs in different NUMA node can penalize
> performance, so better configure only one channel per core in the same
> NUMA node than the NIC, and not per each core in the system.
> 
> Fallback to all other online cores if there are not online CPUs in local
> NUMA node.
> 
> Signed-off-by: Íñigo Huguet <ihuguet@...hat.com>

Acked-by: Martin Habets <habetsm.xilinx@...il.com>

> ---
>  drivers/net/ethernet/sfc/efx_channels.c | 50 ++++++++++++++++---------
>  1 file changed, 33 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
> index ead550ae2709..ec6c2f231e73 100644
> --- a/drivers/net/ethernet/sfc/efx_channels.c
> +++ b/drivers/net/ethernet/sfc/efx_channels.c
> @@ -78,31 +78,48 @@ static const struct efx_channel_type efx_default_channel_type = {
>   * INTERRUPTS
>   *************/
>  
> -static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
> +static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
>  {
> -	cpumask_var_t thread_mask;
> +	cpumask_var_t filter_mask;
>  	unsigned int count;
>  	int cpu;
> +
> +	if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
> +		netif_warn(efx, probe, efx->net_dev,
> +			   "RSS disabled due to allocation failure\n");
> +		return 1;
> +	}
> +
> +	cpumask_copy(filter_mask, cpu_online_mask);
> +	if (local_node) {
> +		int numa_node = pcibus_to_node(efx->pci_dev->bus);
> +
> +		cpumask_and(filter_mask, filter_mask, cpumask_of_node(numa_node));
> +	}
> +
> +	count = 0;
> +	for_each_cpu(cpu, filter_mask) {
> +		++count;
> +		cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
> +	}
> +
> +	free_cpumask_var(filter_mask);
> +
> +	return count;
> +}
> +
> +static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
> +{
> +	unsigned int count;
>  
>  	if (rss_cpus) {
>  		count = rss_cpus;
>  	} else {
> -		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
> -			netif_warn(efx, probe, efx->net_dev,
> -				   "RSS disabled due to allocation failure\n");
> -			return 1;
> -		}
> -
> -		count = 0;
> -		for_each_online_cpu(cpu) {
> -			if (!cpumask_test_cpu(cpu, thread_mask)) {
> -				++count;
> -				cpumask_or(thread_mask, thread_mask,
> -					   topology_sibling_cpumask(cpu));
> -			}
> -		}
> +		count = count_online_cores(efx, true);
>  
> -		free_cpumask_var(thread_mask);
> +		/* If no online CPUs in local node, fallback to any online CPUs */
> +		if (count == 0)
> +			count = count_online_cores(efx, false);
>  	}
>  
>  	if (count > EFX_MAX_RX_QUEUES) {
> -- 
> 2.34.1

-- 
Martin Habets <habetsm.xilinx@...il.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ