[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201209211312.3850588-2-anthony.l.nguyen@intel.com>
Date: Wed, 9 Dec 2020 13:13:04 -0800
From: Tony Nguyen <anthony.l.nguyen@...el.com>
To: davem@...emloft.net, kuba@...nel.org
Cc: Bruce Allan <bruce.w.allan@...el.com>, netdev@...r.kernel.org,
sassmann@...hat.com, anthony.l.nguyen@...el.com,
Harikumar Bokkena <harikumarx.bokkena@...el.com>
Subject: [net-next v4 1/9] ice: cleanup stack hog
From: Bruce Allan <bruce.w.allan@...el.com>
In ice_flow_add_prof_sync(), struct ice_flow_prof_params has recently
grown in size hogging stack space when allocated there. Hogging stack
space should be avoided. Change allocation to be on the heap when needed.
Signed-off-by: Bruce Allan <bruce.w.allan@...el.com>
Tested-by: Harikumar Bokkena <harikumarx.bokkena@...el.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@...el.com>
---
drivers/net/ethernet/intel/ice/ice_flow.c | 44 +++++++++++++----------
1 file changed, 26 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index eadc85aee389..2a92071bd7d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -708,37 +708,42 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
- struct ice_flow_prof_params params;
+ struct ice_flow_prof_params *params;
enum ice_status status;
u8 i;
if (!prof)
return ICE_ERR_BAD_PTR;
- memset(¶ms, 0, sizeof(params));
- params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof),
- GFP_KERNEL);
- if (!params.prof)
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
return ICE_ERR_NO_MEMORY;
+ params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
+ GFP_KERNEL);
+ if (!params->prof) {
+ status = ICE_ERR_NO_MEMORY;
+ goto free_params;
+ }
+
/* initialize extraction sequence to all invalid (0xff) */
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
- params.es[i].prot_id = ICE_PROT_INVALID;
- params.es[i].off = ICE_FV_OFFSET_INVAL;
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
}
- params.blk = blk;
- params.prof->id = prof_id;
- params.prof->dir = dir;
- params.prof->segs_cnt = segs_cnt;
+ params->blk = blk;
+ params->prof->id = prof_id;
+ params->prof->dir = dir;
+ params->prof->segs_cnt = segs_cnt;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
*/
for (i = 0; i < segs_cnt; i++)
- memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs));
+ memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs));
- status = ice_flow_proc_segs(hw, ¶ms);
+ status = ice_flow_proc_segs(hw, params);
if (status) {
ice_debug(hw, ICE_DBG_FLOW,
"Error processing a flow's packet segments\n");
@@ -746,19 +751,22 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ params->es);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
}
- INIT_LIST_HEAD(¶ms.prof->entries);
- mutex_init(¶ms.prof->entries_lock);
- *prof = params.prof;
+ INIT_LIST_HEAD(¶ms->prof->entries);
+ mutex_init(¶ms->prof->entries_lock);
+ *prof = params->prof;
out:
if (status)
- devm_kfree(ice_hw_to_dev(hw), params.prof);
+ devm_kfree(ice_hw_to_dev(hw), params->prof);
+free_params:
+ kfree(params);
return status;
}
--
2.26.2
Powered by blists - more mailing lists