[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8aa75912-d1e3-4865-a723-ae1517e9322c@gmail.com>
Date: Mon, 21 Jul 2025 09:37:09 +0300
From: Tariq Toukan <ttoukan.linux@...il.com>
To: Zhu Yanjun <yanjun.zhu@...ux.dev>, saeedm@...dia.com, leon@...nel.org,
tariqt@...dia.com, andrew+netdev@...n.ch, davem@...emloft.net,
edumazet@...gle.com, kuba@...nel.org, netdev@...r.kernel.org,
linux-rdma@...r.kernel.org
Subject: Re: [PATCHv3 next-next 1/1] net/mlx5: Fix build -Wframe-larger-than
warnings
On 21/07/2025 1:41, Zhu Yanjun wrote:
> 在 2025/7/20 9:08, Zhu Yanjun 写道:
>> When building, the following warnings will appear.
>> "
>> pci_irq.c: In function ‘mlx5_ctrl_irq_request’:
>> pci_irq.c:494:1: warning: the frame size of 1040 bytes is larger than
>> 1024 bytes [-Wframe-larger-than=]
>>
>> pci_irq.c: In function ‘mlx5_irq_request_vector’:
>> pci_irq.c:561:1: warning: the frame size of 1040 bytes is larger than
>> 1024 bytes [-Wframe-larger-than=]
>>
>> eq.c: In function ‘comp_irq_request_sf’:
>> eq.c:897:1: warning: the frame size of 1080 bytes is larger than 1024
>> bytes [-Wframe-larger-than=]
>>
>> irq_affinity.c: In function ‘irq_pool_request_irq’:
>> irq_affinity.c:74:1: warning: the frame size of 1048 bytes is larger
>> than 1024 bytes [-Wframe-larger-than=]
>> "
>>
>> These warnings indicate that the stack frame size exceeds 1024 bytes in
>> these functions.
>>
>> To resolve this, instead of allocating large memory buffers on the stack,
>> it is better to use kvzalloc to allocate memory dynamically on the heap.
>> This approach reduces stack usage and eliminates these frame size
>> warnings.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@...ux.dev>
>
> Sorry. Missing the following Acked-by.
>
> Acked-by: Junxian Huang <huangjunxian6@...ilicon.com>
>
> Zhu Yanjun
>
>> ---
>> v2 -> v3: No changes, just send out target net-next;
You wrote next-next by mistake.
>> v1 -> v2: Add kvfree to error handler;
>>
>> 1. This commit only build tests;
>> 2. All the changes are on configuration path, will not make difference
>> on the performance;
>> 3. This commit is just to fix build warnings, not error or bug fixes. So
>> not Fixes tag.
>> ---
>> drivers/net/ethernet/mellanox/mlx5/core/eq.c | 24 +++++++----
>> .../mellanox/mlx5/core/irq_affinity.c | 19 +++++++--
>> .../net/ethernet/mellanox/mlx5/core/pci_irq.c | 40 +++++++++++++------
>> 3 files changed, 60 insertions(+), 23 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/
>> net/ethernet/mellanox/mlx5/core/eq.c
>> index dfb079e59d85..4938dd7c3a09 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
>> @@ -873,19 +873,29 @@ static int comp_irq_request_sf(struct
>> mlx5_core_dev *dev, u16 vecidx)
>> {
>> struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
>> struct mlx5_eq_table *table = dev->priv.eq_table;
>> - struct irq_affinity_desc af_desc = {};
>> + struct irq_affinity_desc *af_desc;
>> struct mlx5_irq *irq;
Keep an empty line.
>> + af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
>> + if (!af_desc)
>> + return -ENOMEM;
>> +
Better move the alloc after the early-return block
(mlx5_irq_pool_is_sf_pool).
>> /* In case SF irq pool does not exist, fallback to the PF irqs*/
>> - if (!mlx5_irq_pool_is_sf_pool(pool))
>> + if (!mlx5_irq_pool_is_sf_pool(pool)) {
>> + kvfree(af_desc);
>> return comp_irq_request_pci(dev, vecidx);
>> + }
>> - af_desc.is_managed = false;
>> - cpumask_copy(&af_desc.mask, cpu_online_mask);
>> - cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
>> - irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
>> - if (IS_ERR(irq))
>> + af_desc->is_managed = false;
>> + cpumask_copy(&af_desc->mask, cpu_online_mask);
>> + cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
>> + irq = mlx5_irq_affinity_request(dev, pool, af_desc);
>> + if (IS_ERR(irq)) {
>> + kvfree(af_desc);
>> return PTR_ERR(irq);
>> + }
>> +
>> + kvfree(af_desc);
I would free it only before return.
>> cpumask_or(&table->used_cpus, &table->used_cpus,
>> mlx5_irq_get_affinity_mask(irq));
>> mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on
>> this irq\n",
>> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/
>> drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
>> index 2691d88cdee1..82d3c2568244 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
>> @@ -47,29 +47,40 @@ static int cpu_get_least_loaded(struct
>> mlx5_irq_pool *pool,
>> static struct mlx5_irq *
>> irq_pool_request_irq(struct mlx5_irq_pool *pool, struct
>> irq_affinity_desc *af_desc)
>> {
>> - struct irq_affinity_desc auto_desc = {};
>> + struct irq_affinity_desc *auto_desc;
>> struct mlx5_irq *irq;
>> u32 irq_index;
>> int err;
>> + auto_desc = kvzalloc(sizeof(*auto_desc), GFP_KERNEL);
>> + if (!auto_desc)
>> + return ERR_PTR(-ENOMEM);
>> +
>> err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
>> GFP_KERNEL);
>> - if (err)
>> + if (err) {
>> + kvfree(auto_desc);
>> return ERR_PTR(err);
>> + }
>> +
>> if (pool->irqs_per_cpu) {
>> if (cpumask_weight(&af_desc->mask) > 1)
>> /* if req_mask contain more then one CPU, set the least
>> loadad CPU
>> * of req_mask
>> */
>> cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
>> - &auto_desc.mask);
>> + &auto_desc->mask);
>> else
>> cpu_get(pool, cpumask_first(&af_desc->mask));
>> }
>> +
>> irq = mlx5_irq_alloc(pool, irq_index,
>> - cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
>> + cpumask_empty(&auto_desc->mask) ? af_desc : auto_desc,
>> NULL);
>> if (IS_ERR(irq))
>> xa_erase(&pool->irqs, irq_index);
>> +
>> + kvfree(auto_desc);
>> +
>> return irq;
>> }
>> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/
>> drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
>> index 40024cfa3099..48aad94b0a5d 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
>> @@ -470,26 +470,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev
>> *dev, struct mlx5_irq *ctrl_irq)
>> struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
>> {
>> struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
>> - struct irq_affinity_desc af_desc;
>> + struct irq_affinity_desc *af_desc;
>> struct mlx5_irq *irq;
>> - cpumask_copy(&af_desc.mask, cpu_online_mask);
>> - af_desc.is_managed = false;
>> + af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
>> + if (!af_desc)
>> + return ERR_PTR(-ENOMEM);
>> +
>> + cpumask_copy(&af_desc->mask, cpu_online_mask);
>> + af_desc->is_managed = false;
>> if (!mlx5_irq_pool_is_sf_pool(pool)) {
>> /* In case we are allocating a control IRQ from a pci
>> device's pool.
>> * This can happen also for a SF if the SFs pool is empty.
>> */
>> if (!pool->xa_num_irqs.max) {
>> - cpumask_clear(&af_desc.mask);
>> + cpumask_clear(&af_desc->mask);
>> /* In case we only have a single IRQ for PF/VF */
>> - cpumask_set_cpu(cpumask_first(cpu_online_mask),
>> &af_desc.mask);
>> + cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc-
>> >mask);
>> }
>> /* Allocate the IRQ in index 0. The vector was already
>> allocated */
>> - irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
>> + irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
>> } else {
>> - irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
>> + irq = mlx5_irq_affinity_request(dev, pool, af_desc);
>> }
>> + kvfree(af_desc);
>> +
>> return irq;
>> }
>> @@ -548,16 +554,26 @@ struct mlx5_irq *mlx5_irq_request_vector(struct
>> mlx5_core_dev *dev, u16 cpu,
>> {
>> struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
>> struct mlx5_irq_pool *pool = table->pcif_pool;
>> - struct irq_affinity_desc af_desc;
>> + struct irq_affinity_desc *af_desc;
While here, fix broken RCT.
>> int offset = MLX5_IRQ_VEC_COMP_BASE;
>> + struct mlx5_irq *irq;
>> +
>> + af_desc = kvzalloc(sizeof(*af_desc), GFP_KERNEL);
>> + if (!af_desc)
>> + return ERR_PTR(-ENOMEM);
>> if (!pool->xa_num_irqs.max)
>> offset = 0;
>> - af_desc.is_managed = false;
>> - cpumask_clear(&af_desc.mask);
>> - cpumask_set_cpu(cpu, &af_desc.mask);
>> - return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
>> + af_desc->is_managed = false;
>> + cpumask_clear(&af_desc->mask);
>> + cpumask_set_cpu(cpu, &af_desc->mask);
>> +
>> + irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
>> +
>> + kvfree(af_desc);
>> +
>> + return irq;
>> }
>> static struct mlx5_irq_pool *
>
>
Powered by blists - more mailing lists