[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190722150133.1157096-1-arnd@arndb.de>
Date: Mon, 22 Jul 2019 17:01:23 +0200
From: Arnd Bergmann <arnd@...db.de>
To: Ariel Elior <aelior@...vell.com>, GR-everest-linux-l2@...vell.com,
"David S. Miller" <davem@...emloft.net>
Cc: Arnd Bergmann <arnd@...db.de>,
Yuval Mintz <Yuval.Mintz@...gic.com>,
Manish Chopra <manishc@...vell.com>,
Michal Kalderon <michal.kalderon@...vell.com>,
Sudarsana Reddy Kalluru <skalluru@...vell.com>,
Denis Bolotin <denis.bolotin@...ium.com>,
Rahul Verma <rverma@...vell.com>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org, clang-built-linux@...glegroups.com
Subject: [PATCH net-next] qed: reduce maximum stack frame size
clang warns about an overly large stack frame in one function
when it decides to inline all __qed_get_vport_*() functions into
__qed_get_vport_stats():
drivers/net/ethernet/qlogic/qed/qed_l2.c:1889:13: error: stack frame size of 1128 bytes in function '_qed_get_vport_stats' [-Werror,-Wframe-larger-than=]
Use a noinline_for_stack annotation to prevent clang from inlining
these, which keeps the maximum stack usage at around half of that
in the worst case, similar to what we get with gcc.
Fixes: 86622ee75312 ("qed: Move statistics to L2 code")
Signed-off-by: Arnd Bergmann <arnd@...db.de>
---
drivers/net/ethernet/qlogic/qed/qed_l2.c | 34 +++++++++++-------------
1 file changed, 15 insertions(+), 19 deletions(-)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 9f36e7948222..1a5fc2ae351c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0;
@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
-static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack
+void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0;
@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0;
@@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
-static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats)
+static noinline_for_stack void
+__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
{
struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
--
2.20.0
Powered by blists - more mailing lists