lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 14 Nov 2018 03:46:03 +0100
From:   Frederic Weisbecker <frederic@...nel.org>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     Frederic Weisbecker <frederic@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Wanpeng Li <wanpengli@...cent.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Yauheni Kaliuta <yauheni.kaliuta@...hat.com>,
        Ingo Molnar <mingo@...nel.org>, Rik van Riel <riel@...hat.com>
Subject: [PATCH 19/25] sched/vite: Handle nice updates under vtime

On the vtime level, nice updates are currently handled on context
switches. When a task's nice value gets updated while it is sleeping,
the context switch takes into account the new nice value in order to
later record the vtime delta to the appropriate kcpustat index.

We have yet to handle live updates: when set_user_nice() is called
while the target is running. We'll handle that on two sides:

* If the caller of set_user_nice() is the current task, we update the
  vtime state in place.

* If the target runs on a different CPU, we interrupt it with an IPI to
  update the vtime state in place.

The vtime update in question consists in flushing the pending vtime
delta to the task/kcpustat and resume the accounting on top of the new
nice value.

Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Yauheni Kaliuta <yauheni.kaliuta@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Rik van Riel <riel@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Wanpeng Li <wanpengli@...cent.com>
Cc: Ingo Molnar <mingo@...nel.org>
---
 include/linux/vtime.h  |  2 ++
 kernel/sched/core.c    |  4 ++++
 kernel/sched/cputime.c | 41 ++++++++++++++++++++++++++++++++++-------
 kernel/sched/sched.h   | 39 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 79 insertions(+), 7 deletions(-)

diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index a53f6ea..b4566d5 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -85,6 +85,8 @@ extern void vtime_guest_enter(struct task_struct *tsk);
 extern void vtime_guest_exit(struct task_struct *tsk);
 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
 extern void vtime_exit_task(struct task_struct *tsk);
+extern void vtime_set_nice_local(struct task_struct *tsk);
+extern void vtime_set_nice_remote(int cpu);
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
 static inline void vtime_user_enter(struct task_struct *tsk) { }
 static inline void vtime_user_exit(struct task_struct *tsk) { }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f12225f..e8f0437 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3868,6 +3868,7 @@ void set_user_nice(struct task_struct *p, long nice)
 	int old_prio, delta;
 	struct rq_flags rf;
 	struct rq *rq;
+	long old_nice;
 
 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
 		return;
@@ -3878,6 +3879,8 @@ void set_user_nice(struct task_struct *p, long nice)
 	rq = task_rq_lock(p, &rf);
 	update_rq_clock(rq);
 
+	old_nice = task_nice(p);
+
 	/*
 	 * The RT priorities are set via sched_setscheduler(), but we still
 	 * allow the 'normal' nice value to be set - but as expected
@@ -3913,6 +3916,7 @@ void set_user_nice(struct task_struct *p, long nice)
 	if (running)
 		set_curr_task(rq, p);
 out_unlock:
+	vtime_set_nice(rq, p, old_nice);
 	task_rq_unlock(rq, p, &rf);
 }
 EXPORT_SYMBOL(set_user_nice);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 07c2e7f..2b35132 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -733,13 +733,13 @@ static void vtime_account_system(struct task_struct *tsk,
 }
 
 static void vtime_account_guest(struct task_struct *tsk,
-				struct vtime *vtime)
+				struct vtime *vtime, bool force)
 {
 	enum cpu_usage_stat index;
 
 	vtime->gtime += get_vtime_delta(vtime);
 
-	if (vtime->gtime < TICK_NSEC)
+	if (vtime->gtime < TICK_NSEC && !force)
 		return;
 
 	if (vtime->nice)
@@ -752,13 +752,13 @@ static void vtime_account_guest(struct task_struct *tsk,
 }
 
 static void vtime_account_user(struct task_struct *tsk,
-			       struct vtime *vtime)
+			       struct vtime *vtime, bool force)
 {
 	enum cpu_usage_stat index;
 
 	vtime->utime += get_vtime_delta(vtime);
 
-	if (vtime->utime < TICK_NSEC)
+	if (vtime->utime < TICK_NSEC && !force)
 		return;
 
 	if (vtime->nice)
@@ -776,7 +776,7 @@ static void __vtime_account_kernel(struct task_struct *tsk,
 {
 	/* We might have scheduled out from guest path */
 	if (vtime->state == VTIME_GUEST)
-		vtime_account_guest(tsk, vtime);
+		vtime_account_guest(tsk, vtime, false);
 	else
 		vtime_account_system(tsk, vtime);
 }
@@ -808,7 +808,7 @@ void vtime_user_exit(struct task_struct *tsk)
 	struct vtime *vtime = &tsk->vtime;
 
 	write_seqcount_begin(&vtime->seqcount);
-	vtime_account_user(tsk, vtime);
+	vtime_account_user(tsk, vtime, false);
 	vtime->state = VTIME_SYS;
 	write_seqcount_end(&vtime->seqcount);
 }
@@ -836,7 +836,7 @@ void vtime_guest_exit(struct task_struct *tsk)
 	struct vtime *vtime = &tsk->vtime;
 
 	write_seqcount_begin(&vtime->seqcount);
-	vtime_account_guest(tsk, vtime);
+	vtime_account_guest(tsk, vtime, false);
 	tsk->flags &= ~PF_VCPU;
 	vtime->state = VTIME_SYS;
 	write_seqcount_end(&vtime->seqcount);
@@ -937,6 +937,33 @@ void vtime_exit_task(struct task_struct *t)
 	local_irq_restore(flags);
 }
 
+void vtime_set_nice_local(struct task_struct *t)
+{
+	struct vtime *vtime = &t->vtime;
+
+	write_seqcount_begin(&vtime->seqcount);
+	if (vtime->state == VTIME_USER)
+		vtime_account_user(t, vtime, true);
+	else if (vtime->state == VTIME_GUEST)
+		vtime_account_guest(t, vtime, true);
+	vtime->nice = (task_nice(t) > 0) ? 1 : 0;
+	write_seqcount_end(&vtime->seqcount);
+}
+
+static void vtime_set_nice_func(struct irq_work *work)
+{
+	vtime_set_nice_local(current);
+}
+
+static DEFINE_PER_CPU(struct irq_work, vtime_set_nice_work) = {
+	.func = vtime_set_nice_func,
+};
+
+void vtime_set_nice_remote(int cpu)
+{
+	irq_work_queue_on(&per_cpu(vtime_set_nice_work, cpu), cpu);
+}
+
 u64 task_gtime(struct task_struct *t)
 {
 	struct vtime *vtime = &t->vtime;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 618577f..c7846ca 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1790,6 +1790,45 @@ static inline int sched_tick_offload_init(void) { return 0; }
 static inline void sched_update_tick_dependency(struct rq *rq) { }
 #endif
 
+static inline void vtime_set_nice(struct rq *rq,
+				  struct task_struct *p, long old_nice)
+{
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+	long nice;
+	int cpu;
+
+	if (!vtime_accounting_enabled())
+		return;
+
+	cpu = cpu_of(rq);
+
+	if (!vtime_accounting_enabled_cpu(cpu))
+		return;
+
+	/*
+	 * Task not running, nice update will be seen by vtime on its
+	 * next context switch.
+	 */
+	if (!task_current(rq, p))
+		return;
+
+	nice = task_nice(p);
+
+	/* Task stays nice, still accounted as nice in kcpustat */
+	if (old_nice > 0 && nice > 0)
+		return;
+
+	/* Task stays rude, still accounted as non-nice in kcpustat */
+	if (old_nice <= 0 && nice <= 0)
+		return;
+
+	if (p == current)
+		vtime_set_nice_local(p);
+	else
+		vtime_set_nice_remote(cpu);
+#endif
+}
+
 static inline void add_nr_running(struct rq *rq, unsigned count)
 {
 	unsigned prev_nr = rq->nr_running;
-- 
2.7.4

Powered by blists - more mailing lists