[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240905171210.267626-1-andriy.shevchenko@linux.intel.com>
Date: Thu, 5 Sep 2024 20:12:10 +0300
From: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
To: Ingo Molnar <mingo@...nel.org>,
linux-kernel@...r.kernel.org,
llvm@...ts.linux.dev
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Bill Wendling <morbo@...gle.com>,
Justin Stitt <justinstitt@...gle.com>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
Subject: [PATCH v1 1/1] sched/fair: Mark cfs_bandwidth_used() and m*_vruntime() with __maybe_unused
When cfs_bandwidth_used() is unused, it prevents kernel builds
with clang, `make W=1` and CONFIG_WERROR=y:
kernel/sched/fair.c:526:19: error: unused function 'max_vruntime' [-Werror,-Wunused-function]
526 | static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
| ^~~~~~~~~~~~
kernel/sched/fair.c:6580:20: error: unused function 'cfs_bandwidth_used' [-Werror,-Wunused-function]
6580 | static inline bool cfs_bandwidth_used(void)
| ^~~~~~~~~~~~~~~~~~
Fix this by marking them with __maybe_unused (all cases for the sake of
symmetry).
See also commit 6863f5643dd7 ("kbuild: allow Clang to find unused static
inline functions for W=1 build").
Signed-off-by: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
---
kernel/sched/fair.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9057584ec06d..b9d35675db50 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -523,7 +523,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
* Scheduling class tree data structure manipulation methods:
*/
-static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
+static inline __maybe_unused u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
if (delta > 0)
@@ -532,7 +532,7 @@ static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
return max_vruntime;
}
-static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
+static inline __maybe_unused u64 min_vruntime(u64 min_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - min_vruntime);
if (delta < 0)
@@ -5547,7 +5547,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
#ifdef CONFIG_JUMP_LABEL
static struct static_key __cfs_bandwidth_used;
-static inline bool cfs_bandwidth_used(void)
+static inline __maybe_unused bool cfs_bandwidth_used(void)
{
return static_key_false(&__cfs_bandwidth_used);
}
@@ -5562,7 +5562,7 @@ void cfs_bandwidth_usage_dec(void)
static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
}
#else /* CONFIG_JUMP_LABEL */
-static bool cfs_bandwidth_used(void)
+static inline __maybe_unused bool cfs_bandwidth_used(void)
{
return true;
}
@@ -6577,7 +6577,7 @@ static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
#else /* CONFIG_CFS_BANDWIDTH */
-static inline bool cfs_bandwidth_used(void)
+static inline __maybe_unused bool cfs_bandwidth_used(void)
{
return false;
}
--
2.43.0.rc1.1336.g36b5255a03ac
Powered by blists - more mailing lists