[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1542163569-20047-26-git-send-email-frederic@kernel.org>
Date: Wed, 14 Nov 2018 03:46:09 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Wanpeng Li <wanpengli@...cent.com>,
Thomas Gleixner <tglx@...utronix.de>,
Yauheni Kaliuta <yauheni.kaliuta@...hat.com>,
Ingo Molnar <mingo@...nel.org>, Rik van Riel <riel@...hat.com>
Subject: [PATCH 25/25] sched/vtime: Clarify vtime_task_switch() argument layout
This function deals with the previous and next tasks during a context
switch. But only the previous is passed as an argument, the next task
being deduced from current. Make the code clearer by passing both
previous and next as arguments.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Yauheni Kaliuta <yauheni.kaliuta@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Rik van Riel <riel@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Wanpeng Li <wanpengli@...cent.com>
Cc: Ingo Molnar <mingo@...nel.org>
---
arch/ia64/include/asm/cputime.h | 3 ++-
arch/ia64/kernel/time.c | 5 +++--
arch/powerpc/include/asm/cputime.h | 8 +++++---
arch/s390/kernel/vtime.c | 13 +++++++------
include/linux/vtime.h | 17 +++++++++++------
kernel/sched/core.c | 2 +-
kernel/sched/cputime.c | 18 ++++++++++--------
7 files changed, 39 insertions(+), 27 deletions(-)
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 3d665c0..0bc90a1 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -19,7 +19,8 @@
#define __IA64_CPUTIME_H
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void arch_vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next);
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 46a9798..908bd4f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -105,10 +105,11 @@ void vtime_flush(struct task_struct *tsk)
* accumulated times to the current process, and to prepare accounting on
* the next process.
*/
-void arch_vtime_task_switch(struct task_struct *prev)
+void arch_vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
struct thread_info *pi = task_thread_info(prev);
- struct thread_info *ni = task_thread_info(current);
+ struct thread_info *ni = task_thread_info(next);
ni->ac_stamp = pi->ac_stamp;
ni->ac_stime = ni->ac_utime = 0;
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index ae73dc8..9d68040 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -47,7 +47,8 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
*/
#ifdef CONFIG_PPC64
#define get_accounting(tsk) (&get_paca()->accounting)
-static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+static inline void arch_vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next) { }
#else
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
/*
@@ -55,9 +56,10 @@ static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
* accumulated times to the current process, and to prepare accounting on
* the next process.
*/
-static inline void arch_vtime_task_switch(struct task_struct *prev)
+static inline void arch_vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
- struct cpu_accounting_data *acct = get_accounting(current);
+ struct cpu_accounting_data *acct = get_accounting(next);
struct cpu_accounting_data *acct0 = get_accounting(prev);
acct->starttime = acct0->starttime;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b6b888d..fcfeb63 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -191,7 +191,8 @@ static int do_account_vtime(struct task_struct *tsk)
return virt_timer_forward(user + guest + system + hardirq + softirq);
}
-void vtime_task_switch(struct task_struct *prev)
+void vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
do_account_vtime(prev);
prev->thread.user_timer = S390_lowcore.user_timer;
@@ -199,11 +200,11 @@ void vtime_task_switch(struct task_struct *prev)
prev->thread.system_timer = S390_lowcore.system_timer;
prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
prev->thread.softirq_timer = S390_lowcore.softirq_timer;
- S390_lowcore.user_timer = current->thread.user_timer;
- S390_lowcore.guest_timer = current->thread.guest_timer;
- S390_lowcore.system_timer = current->thread.system_timer;
- S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
- S390_lowcore.softirq_timer = current->thread.softirq_timer;
+ S390_lowcore.user_timer = next->thread.user_timer;
+ S390_lowcore.guest_timer = next->thread.guest_timer;
+ S390_lowcore.system_timer = next->thread.system_timer;
+ S390_lowcore.hardirq_timer = next->thread.hardirq_timer;
+ S390_lowcore.softirq_timer = next->thread.softirq_timer;
}
/*
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index b4566d5..188eace 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -16,7 +16,8 @@ struct task_struct;
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
-extern void vtime_task_switch(struct task_struct *prev);
+extern void vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next);
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
@@ -51,19 +52,22 @@ static inline bool vtime_accounting_enabled_this_cpu(void)
return false;
}
-extern void vtime_task_switch_generic(struct task_struct *prev);
+extern void vtime_task_switch_generic(struct task_struct *prev,
+ struct task_struct *next);
-static inline void vtime_task_switch(struct task_struct *prev)
+static inline void vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
if (vtime_accounting_enabled_this_cpu())
- vtime_task_switch_generic(prev);
+ vtime_task_switch_generic(prev, next);
}
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
-static inline void vtime_task_switch(struct task_struct *prev) { }
+static inline void vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next) { }
#endif
@@ -78,7 +82,8 @@ static inline void vtime_account_kernel(struct task_struct *tsk) { }
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void arch_vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next);
extern void vtime_user_enter(struct task_struct *tsk);
extern void vtime_user_exit(struct task_struct *tsk);
extern void vtime_guest_enter(struct task_struct *tsk);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e8f0437..6e315b6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2675,7 +2675,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* transition, resulting in a double drop.
*/
prev_state = prev->state;
- vtime_task_switch(prev);
+ vtime_task_switch(prev, current);
perf_event_task_sched_in(prev, current);
finish_task(prev);
finish_lock_switch(rq);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 3afde9f..c6a953d 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -421,7 +421,8 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
-void vtime_task_switch(struct task_struct *prev)
+void vtime_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
if (is_idle_task(prev))
vtime_account_idle(prev);
@@ -429,7 +430,7 @@ void vtime_task_switch(struct task_struct *prev)
vtime_account_kernel(prev);
vtime_flush(prev);
- arch_vtime_task_switch(prev);
+ arch_vtime_task_switch(prev, next);
}
# endif
@@ -848,7 +849,8 @@ void vtime_account_idle(struct task_struct *tsk)
account_idle_time(get_vtime_delta(&tsk->vtime));
}
-void vtime_task_switch_generic(struct task_struct *prev)
+void vtime_task_switch_generic(struct task_struct *prev,
+ struct task_struct *next)
{
struct vtime *vtime = &prev->vtime;
struct kernel_cpustat *kcpustat = kcpustat_this_cpu;
@@ -869,7 +871,7 @@ void vtime_task_switch_generic(struct task_struct *prev)
write_seqcount_end(&vtime->seqcount);
}
- vtime = ¤t->vtime;
+ vtime = &next->vtime;
/*
* Ignore the next task if it has been preempted after
@@ -881,18 +883,18 @@ void vtime_task_switch_generic(struct task_struct *prev)
}
write_seqcount_begin(&vtime->seqcount);
- if (is_idle_task(current))
+ if (is_idle_task(next))
vtime->state = VTIME_IDLE;
- else if (current->flags & PF_VCPU)
+ else if (next->flags & PF_VCPU)
vtime->state = VTIME_GUEST;
else
vtime->state = VTIME_SYS;
vtime->starttime = sched_clock();
vtime->cpu = smp_processor_id();
- vtime->nice = (task_nice(current) > 0) ? 1 : 0;
+ vtime->nice = (task_nice(next) > 0) ? 1 : 0;
write_seqcount_end(&vtime->seqcount);
- rcu_assign_pointer(kcpustat->curr, current);
+ rcu_assign_pointer(kcpustat->curr, next);
}
void vtime_init_idle(struct task_struct *t, int cpu)
--
2.7.4
Powered by blists - more mailing lists