lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240125082254.424859-6-ashok.raj@intel.com>
Date: Thu, 25 Jan 2024 00:22:54 -0800
From: Ashok Raj <ashok.raj@...el.com>
To: Hans de Goede <hdegoede@...hat.com>,
	Ilpo Jarvinen <ilpo.jarvinen@...ux.intel.com>,
	markgross@...nel.org
Cc: Jithu Joseph <jithu.joseph@...el.com>,
	rostedt@...dmis.org,
	ashok.raj@...el.com,
	tony.luck@...el.com,
	LKML <linux-kernel@...r.kernel.org>,
	platform-driver-x86@...r.kernel.org,
	patches@...ts.linux.dev,
	pengfei.xu@...el.com
Subject: [PATCH 5/5] platform/x86/intel/ifs: Add an entry rendezvous for SAF

The activation for SAF includes a parameter to make microcode wait for both
threads to join. It's preferable to perform an entry rendezvous before
the activation to ensure that they start the `wrmsr` close enough to each
other. In some cases it has been observed that one of the threads might be
just a bit late to arrive. An entry rendezvous reduces the likelihood of
these cases occurring.

Add an entry rendezvous to ensure the activation on both threads happen
close enough to each other.

Signed-off-by: Ashok Raj <ashok.raj@...el.com>
Reviewed-by: Tony Luck <tony.luck@...el.com>
---
 drivers/platform/x86/intel/ifs/runtest.c | 48 +++++++++++++-----------
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index e3307dd8e3c4..95b4b71fab53 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -140,6 +140,29 @@ static bool can_restart(union ifs_status status)
 	return false;
 }
 
+#define SPINUNIT 100 /* 100 nsec */
+static atomic_t array_cpus_in;
+static atomic_t scan_cpus_in;
+
+/*
+ * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
+ */
+static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
+{
+	int cpu = smp_processor_id();
+	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+	int all_cpus = cpumask_weight(smt_mask);
+
+	atomic_inc(t);
+	while (atomic_read(t) < all_cpus) {
+		if (timeout < SPINUNIT)
+			return;
+		ndelay(SPINUNIT);
+		timeout -= SPINUNIT;
+		touch_nmi_watchdog();
+	}
+}
+
 /*
  * Execute the scan. Called "simultaneously" on all threads of a core
  * at high priority using the stop_cpus mechanism.
@@ -165,6 +188,8 @@ static int doscan(void *data)
 	/* Only the first logical CPU on a core reports result */
 	first = cpumask_first(cpu_smt_mask(cpu));
 
+	wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC);
+
 	/*
 	 * This WRMSR will wait for other HT threads to also write
 	 * to this MSR (at most for activate.delay cycles). Then it
@@ -230,6 +255,7 @@ static void ifs_test_core(int cpu, struct device *dev)
 		}
 
 		params.activate = &activate;
+		atomic_set(&scan_cpus_in, 0);
 		stop_core_cpuslocked(cpu, doscan, &params);
 
 		status = params.status;
@@ -270,28 +296,6 @@ static void ifs_test_core(int cpu, struct device *dev)
 	}
 }
 
-#define SPINUNIT 100 /* 100 nsec */
-static atomic_t array_cpus_in;
-
-/*
- * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
- */
-static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
-{
-	int cpu = smp_processor_id();
-	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
-	int all_cpus = cpumask_weight(smt_mask);
-
-	atomic_inc(t);
-	while (atomic_read(t) < all_cpus) {
-		if (timeout < SPINUNIT)
-			return;
-		ndelay(SPINUNIT);
-		timeout -= SPINUNIT;
-		touch_nmi_watchdog();
-	}
-}
-
 static int do_array_test(void *data)
 {
 	union ifs_array *command = data;
-- 
2.39.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ