lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 2 Feb 2018 13:46:47 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     "Rafael J. Wysocki" <rjw@...ysocki.net>
Cc:     Mel Gorman <mgorman@...hsingularity.net>,
        Mike Galbraith <efault@....de>,
        Matt Fleming <matt@...eblueprint.co.uk>,
        LKML <linux-kernel@...r.kernel.org>,
        srinivas.pandruvada@...ux.intel.com
Subject: Re: [PATCH 4/4] sched/fair: Use a recently used CPU as an idle
 candidate and the basis for SIS

On Fri, Feb 02, 2018 at 12:42:29PM +0100, Rafael J. Wysocki wrote:
> > If you really care you can do async IPIs and do a custom serialization
> > that only waits when you do back-to-back things, which should be fairly
> > uncommon I'd think.
> 
> In this particular case we don't want to return to user space before the
> MSR is actually written with the new value.

Why not?

I was thinking of something like the below, which would in fact do
exactly that.

---
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7edf7a0e5a96..f0caa5cc7adb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -29,6 +29,7 @@
 #include <linux/debugfs.h>
 #include <linux/acpi.h>
 #include <linux/vmalloc.h>
+#include <linux/smp.h>
 #include <trace/events/power.h>
 
 #include <asm/div64.h>
@@ -767,6 +768,40 @@ static void intel_pstate_hwp_set(unsigned int cpu)
 	wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
 }
 
+static void __intel_pstate_hwp_set_desired(int val)
+{
+	u64 value;
+
+	value = rdmsrl(MSR_HWP_REQUEST);
+	value &= ~GENMASK_ULL(23, 16);
+	value |= (val & 0xff) << 16;
+	wrmsrl(MSR_HWP_REQUEST, val);
+}
+
+static void __intel_pstate_hwp_func(void *data)
+{
+	__intel_pstate_hwp_set_desired((int)data);
+}
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct __call_single_data, csd_data);
+
+static void intel_pstate_hwp_set_desired(int cpu, int val)
+{
+	struct call_function_data *csd;
+
+	preempt_disable();
+	csd = this_cpu_ptr(&csd_data);
+	/* wait for previous invocation to complete */
+	csd_lock_wait(csd);
+
+	csd->func = __intel_pstate_hwp_func;
+	csd->info = (unsigned long)val;
+
+	smp_call_function_single_async(cpu, csd);
+	preempt_enable();
+}
+
+
 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
 {
 	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..2bc125ec6146 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -14,6 +14,11 @@
 #include <linux/init.h>
 #include <linux/llist.h>
 
+enum {
+	CSD_FLAG_LOCK		= 0x01,
+	CSD_FLAG_SYNCHRONOUS	= 0x02,
+};
+
 typedef void (*smp_call_func_t)(void *info);
 struct __call_single_data {
 	struct llist_node llist;
@@ -26,6 +31,11 @@ struct __call_single_data {
 typedef struct __call_single_data call_single_data_t
 	__aligned(sizeof(struct __call_single_data));
 
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
+{
+	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
+}
+
 /* total number of cpus in this system (may exceed NR_CPUS) */
 extern unsigned int total_cpus;
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 084c8b3a2681..af0ef9eb7679 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -22,11 +22,6 @@
 
 #include "smpboot.h"
 
-enum {
-	CSD_FLAG_LOCK		= 0x01,
-	CSD_FLAG_SYNCHRONOUS	= 0x02,
-};
-
 struct call_function_data {
 	call_single_data_t	__percpu *csd;
 	cpumask_var_t		cpumask;
@@ -103,11 +98,6 @@ void __init call_function_init(void)
  * previous function call. For multi-cpu calls its even more interesting
  * as we'll have to ensure no other cpu is observing our csd.
  */
-static __always_inline void csd_lock_wait(call_single_data_t *csd)
-{
-	smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
-}
-
 static __always_inline void csd_lock(call_single_data_t *csd)
 {
 	csd_lock_wait(csd);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ