[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230925020528.777578-2-yury.norov@gmail.com>
Date: Sun, 24 Sep 2023 19:05:25 -0700
From: Yury Norov <yury.norov@...il.com>
To: linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
linux-rdma@...r.kernel.org
Cc: Yury Norov <yury.norov@...il.com>,
Tariq Toukan <ttoukan.linux@...il.com>,
Valentin Schneider <vschneid@...hat.com>,
Maher Sanalla <msanalla@...dia.com>,
Ingo Molnar <mingo@...nel.org>,
Mel Gorman <mgorman@...e.de>,
Saeed Mahameed <saeedm@...dia.com>,
Leon Romanovsky <leon@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Pawel Chmielewski <pawel.chmielewski@...el.com>,
Jacob Keller <jacob.e.keller@...el.com>,
Yury Norov <ynorov@...dia.com>
Subject: [PATCH 1/4] net: mellanox: drop mlx5_cpumask_default_spread()
The function duplicates existing cpumask_local_spread(), and it's O(N),
while cpumask_local_spread() implementation is based on bsearch, and
thus is O(log n), so drop mlx5_cpumask_default_spread() and use generic
cpumask_local_spread().
Signed-off-by: Yury Norov <yury.norov@...il.com>
Signed-off-by: Yury Norov <ynorov@...dia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/eq.c | 28 ++------------------
1 file changed, 2 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ea0405e0a43f..bd9f857cc52d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -828,30 +828,6 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
mlx5_irq_release_vector(irq);
}
-static int mlx5_cpumask_default_spread(int numa_node, int index)
-{
- const struct cpumask *prev = cpu_none_mask;
- const struct cpumask *mask;
- int found_cpu = 0;
- int i = 0;
- int cpu;
-
- rcu_read_lock();
- for_each_numa_hop_mask(mask, numa_node) {
- for_each_cpu_andnot(cpu, mask, prev) {
- if (i++ == index) {
- found_cpu = cpu;
- goto spread_done;
- }
- }
- prev = mask;
- }
-
-spread_done:
- rcu_read_unlock();
- return found_cpu;
-}
-
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
{
#ifdef CONFIG_RFS_ACCEL
@@ -873,7 +849,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev);
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ cpu = cpumask_local_spread(vecidx, dev->priv.numa_node);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -1125,7 +1101,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
if (mask)
cpu = cpumask_first(mask);
else
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+ cpu = cpumask_local_spread(vector, dev->priv.numa_node);
return cpu;
}
--
2.39.2
Powered by blists - more mailing lists