[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1541656452-3326-1-git-send-email-wang.yi59@zte.com.cn>
Date: Thu, 8 Nov 2018 13:54:12 +0800
From: Yi Wang <wang.yi59@....com.cn>
To: mingo@...hat.com
Cc: peterz@...radead.org, linux-kernel@...r.kernel.org,
zhong.weidong@....com.cn, Yi Wang <wang.yi59@....com.cn>
Subject: [PATCH] sched/fair: make some function static
Make some function static as they are not used outside of fair.c.
This fixes the following warning when building with 'W=1':
kernel/sched/fair.c:2439:6: warning: no previous prototype for ‘task_numa_work’ [-Wmissing-prototypes]
kernel/sched/fair.c:2584:6: warning: no previous prototype for ‘task_tick_numa’ [-Wmissing-prototypes]
kernel/sched/fair.c:3548:6: warning: no previous prototype for ‘sync_entity_load_avg’ [-Wmissing-prototypes]
kernel/sched/fair.c:3561:6: warning: no previous prototype for ‘remove_entity_load_avg’ [-Wmissing-prototypes]
Signed-off-by: Yi Wang <wang.yi59@....com.cn>
---
kernel/sched/fair.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb..615e168 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2436,7 +2436,7 @@ static void reset_ptenuma_scan(struct task_struct *p)
* The expensive part of numa migration is done from task_work context.
* Triggered from task_tick_numa().
*/
-void task_numa_work(struct callback_head *work)
+static void task_numa_work(struct callback_head *work)
{
unsigned long migrate, next_scan, now = jiffies;
struct task_struct *p = current;
@@ -2581,7 +2581,7 @@ void task_numa_work(struct callback_head *work)
/*
* Drive the periodic memory faults..
*/
-void task_tick_numa(struct rq *rq, struct task_struct *curr)
+static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
struct callback_head *work = &curr->numa_work;
u64 period, now;
@@ -3545,7 +3545,7 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
* Synchronize entity load avg of dequeued entity without locking
* the previous rq.
*/
-void sync_entity_load_avg(struct sched_entity *se)
+static void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
@@ -3558,7 +3558,7 @@ void sync_entity_load_avg(struct sched_entity *se)
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
-void remove_entity_load_avg(struct sched_entity *se)
+static void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
unsigned long flags;
--
1.8.3.1
Powered by blists - more mailing lists