lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221215184300.1592872-3-srinivas.pandruvada@linux.intel.com>
Date:   Thu, 15 Dec 2022 10:43:00 -0800
From:   Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
To:     rafael@...nel.org, peterz@...radead.org, frederic@...nel.org,
        daniel.lezcano@...aro.org
Cc:     linux-pm@...r.kernel.org, linux-kernel@...r.kernel.org,
        len.brown@...el.com,
        Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
Subject: [RFC/RFT PATCH 2/2] sched/core: Add max duration to play_precise_idle()

When there is a storm of soft irqs, it is possible that caller
spends lot more time in play_idle_precise() than the actual
idle time requested.

Add a pramater to play_precise_idle() to specify a maximum time to
exit the loop in play_precise_idle(), even if the the total forced idle
time is less than the desired. If exit before the required forced idle
duration return error to the caller, otherwise return 0.

For powercap/idle_inject, the maximum wait will be capped to two times
of idle duration.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>
---
 drivers/powercap/idle_inject.c |  4 +++-
 include/linux/cpu.h            |  4 ++--
 kernel/sched/idle.c            | 13 +++++++++++--
 3 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index fe86a09e3b67..2764155d184e 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -132,6 +132,7 @@ static void idle_inject_fn(unsigned int cpu)
 {
 	struct idle_inject_device *ii_dev;
 	struct idle_inject_thread *iit;
+	u64 idle_duration_ns;
 
 	ii_dev = per_cpu(idle_inject_device, cpu);
 	iit = per_cpu_ptr(&idle_inject_thread, cpu);
@@ -140,8 +141,9 @@ static void idle_inject_fn(unsigned int cpu)
 	 * Let the smpboot main loop know that the task should not run again.
 	 */
 	iit->should_run = 0;
+	idle_duration_ns = READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC;
 
-	play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC,
+	play_idle_precise(idle_duration_ns, idle_duration_ns << 1,
 			  READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC);
 }
 
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 314802f98b9d..9969940553e5 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -190,11 +190,11 @@ void arch_cpu_idle_dead(void);
 int cpu_report_state(int cpu);
 int cpu_check_up_prepare(int cpu);
 void cpu_set_state_online(int cpu);
-void play_idle_precise(u64 duration_ns, u64 latency_ns);
+int play_idle_precise(u64 duration_ns, u64 max_duration_ns, u64 latency_ns);
 
 static inline void play_idle(unsigned long duration_us)
 {
-	play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+	play_idle_precise(duration_us * NSEC_PER_USEC, 0, U64_MAX);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 77d6168288cf..c98b77ff84f5 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -360,10 +360,10 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
 	return HRTIMER_NORESTART;
 }
 
-void play_idle_precise(u64 duration_ns, u64 latency_ns)
+int play_idle_precise(u64 duration_ns, u64 duration_ns_max, u64 latency_ns)
 {
 	struct idle_timer it;
-	ktime_t remaining;
+	ktime_t remaining, end_time;
 
 	/*
 	 * Only FIFO tasks can disable the tick since they don't need the forced
@@ -378,6 +378,8 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
 
 	remaining = ns_to_ktime(duration_ns);
 
+	end_time = ktime_add_ns(ktime_get(), duration_ns_max);
+
 	do {
 		rcu_sleep_check();
 		preempt_disable();
@@ -402,12 +404,19 @@ void play_idle_precise(u64 duration_ns, u64 latency_ns)
 		preempt_fold_need_resched();
 		preempt_enable();
 
+		if (!READ_ONCE(it.done) && duration_ns_max) {
+			if (ktime_after(ktime_get(), end_time))
+				return -EAGAIN;
+		}
+
 		/* Give ksoftirqd 1 jiffy to get a chance to start its job */
 		if (!READ_ONCE(it.done) && task_is_running(__this_cpu_read(ksoftirqd))) {
 			__set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_timeout(1);
 		}
 	} while (!READ_ONCE(it.done));
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(play_idle_precise);
 
-- 
2.38.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ