lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20230809145333.03f31309@canb.auug.org.au>
Date: Wed, 9 Aug 2023 14:53:33 +1000
From: Stephen Rothwell <sfr@...b.auug.org.au>
To: Yury Norov <yury.norov@...il.com>, David Miller <davem@...emloft.net>,
 Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>
Cc: Networking <netdev@...r.kernel.org>, Linux Kernel Mailing List
 <linux-kernel@...r.kernel.org>, Linux Next Mailing List
 <linux-next@...r.kernel.org>, Maher Sanalla <msanalla@...dia.com>, Saeed
 Mahameed <saeedm@...dia.com>
Subject: linux-next: manual merge of the bitmap tree with the net-next tree

Hi all,

Today's linux-next merge of the bitmap tree got a conflict in:

  drivers/net/ethernet/mellanox/mlx5/core/eq.c

between commits:

  54b2cf41b853 ("net/mlx5: Refactor completion IRQ request/release handlers in EQ layer")
  ddd2c79da020 ("net/mlx5: Introduce mlx5_cpumask_default_spread")

from the net-next tree and commit:

  a4be5fa84bb2 ("net: mlx5: switch comp_irqs_request() to using for_each_numa_cpu")

from the bitmap tree.

I fixed it up (I think - see below) and can carry the fix as
necessary. This is now fixed as far as linux-next is concerned, but any
non trivial conflicts should be mentioned to your upstream maintainer
when your tree is submitted for merging.  You may also want to consider
cooperating with the maintainer of the conflicting tree to minimise any
particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ea0405e0a43f,7c8dc0443d6a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@@ -815,112 -803,82 +815,105 @@@ void mlx5_eq_update_ci(struct mlx5_eq *
  }
  EXPORT_SYMBOL(mlx5_eq_update_ci);
  
 -static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
 +static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
  {
  	struct mlx5_eq_table *table = dev->priv.eq_table;
 +	struct mlx5_irq *irq;
  
 -	mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
 +	irq = xa_load(&table->comp_irqs, vecidx);
 +	if (!irq)
 +		return;
 +
 +	xa_erase(&table->comp_irqs, vecidx);
 +	mlx5_irq_release_vector(irq);
  }
  
 -static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
 +static int mlx5_cpumask_default_spread(int numa_node, int index)
  {
- 	const struct cpumask *prev = cpu_none_mask;
- 	const struct cpumask *mask;
 -	struct mlx5_eq_table *table = dev->priv.eq_table;
 -	int ncomp_eqs;
 -	u16 *cpus;
 -	int ret;
 +	int found_cpu = 0;
 +	int i = 0;
- 	int cpu;
+ 	int cpu, hop;
 -	int i;
  
 -	ncomp_eqs = table->num_comp_eqs;
 -	cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
 -	if (!cpus)
 -		return -ENOMEM;
 -
 -	i = 0;
  	rcu_read_lock();
- 	for_each_numa_hop_mask(mask, numa_node) {
- 		for_each_cpu_andnot(cpu, mask, prev) {
- 			if (i++ == index) {
- 				found_cpu = cpu;
- 				goto spread_done;
- 			}
 -	for_each_numa_online_cpu(cpu, hop, dev->priv.numa_node) {
 -		cpus[i] = cpu;
 -		if (++i == ncomp_eqs)
++	for_each_numa_online_cpu(cpu, hop, numa_node) {
++		if (i++ == index) {
++			found_cpu = cpu;
+ 			break;
 +		}
- 		prev = mask;
  	}
- 
- spread_done:
  	rcu_read_unlock();
 -	ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
 -	kfree(cpus);
 -	return ret;
 +	return found_cpu;
  }
  
 -static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
 +static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
  {
 -	struct mlx5_eq_table *table = dev->priv.eq_table;
 -
 -	mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
 +#ifdef CONFIG_RFS_ACCEL
 +#ifdef CONFIG_MLX5_SF
 +	if (mlx5_core_is_sf(dev))
 +		return dev->priv.parent_mdev->priv.eq_table->rmap;
 +#endif
 +	return dev->priv.eq_table->rmap;
 +#else
 +	return NULL;
 +#endif
  }
  
 -static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
 +static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
  {
  	struct mlx5_eq_table *table = dev->priv.eq_table;
 -	int ncomp_eqs = table->num_comp_eqs;
 +	struct cpu_rmap *rmap;
 +	struct mlx5_irq *irq;
 +	int cpu;
  
 -	return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
 +	rmap = mlx5_eq_table_get_pci_rmap(dev);
 +	cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
 +	irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
 +	if (IS_ERR(irq))
 +		return PTR_ERR(irq);
 +
 +	return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
  }
  
 -static void comp_irqs_release(struct mlx5_core_dev *dev)
 +static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
  {
  	struct mlx5_eq_table *table = dev->priv.eq_table;
 +	struct mlx5_irq *irq;
  
 -	mlx5_core_is_sf(dev) ? comp_irqs_release_sf(dev) :
 -			       comp_irqs_release_pci(dev);
 +	irq = xa_load(&table->comp_irqs, vecidx);
 +	if (!irq)
 +		return;
  
 -	kfree(table->comp_irqs);
 +	xa_erase(&table->comp_irqs, vecidx);
 +	mlx5_irq_affinity_irq_release(dev, irq);
  }
  
 -static int comp_irqs_request(struct mlx5_core_dev *dev)
 +static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
  {
  	struct mlx5_eq_table *table = dev->priv.eq_table;
 -	int ncomp_eqs;
 -	int ret;
 +	struct mlx5_irq *irq;
  
 -	ncomp_eqs = table->num_comp_eqs;
 -	table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
 -	if (!table->comp_irqs)
 -		return -ENOMEM;
 +	irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
 +	if (IS_ERR(irq)) {
 +		/* In case SF irq pool does not exist, fallback to the PF irqs*/
 +		if (PTR_ERR(irq) == -ENOENT)
 +			return comp_irq_request_pci(dev, vecidx);
  
 -	ret = mlx5_core_is_sf(dev) ? comp_irqs_request_sf(dev) :
 -				     comp_irqs_request_pci(dev);
 -	if (ret < 0)
 -		kfree(table->comp_irqs);
 +		return PTR_ERR(irq);
 +	}
  
 -	return ret;
 +	return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
 +}
 +
 +static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
 +{
 +	mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
 +			       comp_irq_release_pci(dev, vecidx);
 +}
 +
 +static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
 +{
 +	return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
 +				      comp_irq_request_pci(dev, vecidx);
  }
  
  #ifdef CONFIG_RFS_ACCEL

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ