lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 22 Sep 2021 13:05:12 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     gor@...ux.ibm.com, jpoimboe@...hat.com, jikos@...nel.org,
        mbenes@...e.cz, pmladek@...e.com, mingo@...nel.org
Cc:     linux-kernel@...r.kernel.org, peterz@...radead.org,
        joe.lawrence@...hat.com, fweisbec@...il.com, tglx@...utronix.de,
        hca@...ux.ibm.com, svens@...ux.ibm.com, sumanthk@...ux.ibm.com,
        live-patching@...r.kernel.org, paulmck@...nel.org
Subject: [RFC][PATCH 6/7] context_tracking: Provide SMP ordering using RCU

Use rcu_user_{enter,exit}() calls to provide SMP ordering on context
tracking state stores:

__context_tracking_exit()
  __this_cpu_write(context_tracking.state, CONTEXT_KERNEL)
  rcu_user_exit()
    rcu_eqs_exit()
      rcu_dynticks_eqs_eit()
        rcu_dynticks_inc()
          atomic_add_return() /* smp_mb */

__context_tracking_enter()
  rcu_user_enter()
    rcu_eqs_enter()
      rcu_dynticks_eqs_enter()
        rcu_dynticks_inc()
	  atomic_add_return() /* smp_mb */
  __this_cpu_write(context_tracking.state, state)

This separates USER/KERNEL state with an smp_mb() on each side,
therefore, a user of context_tracking_state_cpu() can say the CPU must
pass through an smp_mb() before changing.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 include/linux/context_tracking_state.h |   12 ++++++++++++
 kernel/context_tracking.c              |    7 ++++---
 2 files changed, 16 insertions(+), 3 deletions(-)

--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -45,11 +45,23 @@ static __always_inline bool context_trac
 {
 	return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
 }
+
+static __always_inline bool context_tracking_state_cpu(int cpu)
+{
+	struct context_tracking *ct = per_cpu_ptr(&context_tracking);
+
+	if (!context_tracking_enabled() || !ct->active)
+		return CONTEXT_DISABLED;
+
+	return ct->state;
+}
+
 #else
 static inline bool context_tracking_in_user(void) { return false; }
 static inline bool context_tracking_enabled(void) { return false; }
 static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
 static inline bool context_tracking_enabled_this_cpu(void) { return false; }
+static inline bool context_tracking_state_cpu(int cpu) { return CONTEXT_DISABLED; }
 #endif /* CONFIG_CONTEXT_TRACKING */
 
 #endif
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -82,7 +82,7 @@ void noinstr __context_tracking_enter(en
 				vtime_user_enter(current);
 				instrumentation_end();
 			}
-			rcu_user_enter();
+			rcu_user_enter(); /* smp_mb */
 		}
 		/*
 		 * Even if context tracking is disabled on this CPU, because it's outside
@@ -149,12 +149,14 @@ void noinstr __context_tracking_exit(enu
 		return;
 
 	if (__this_cpu_read(context_tracking.state) == state) {
+		__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
+
 		if (__this_cpu_read(context_tracking.active)) {
 			/*
 			 * We are going to run code that may use RCU. Inform
 			 * RCU core about that (ie: we may need the tick again).
 			 */
-			rcu_user_exit();
+			rcu_user_exit(); /* smp_mb */
 			if (state == CONTEXT_USER) {
 				instrumentation_begin();
 				vtime_user_exit(current);
@@ -162,7 +164,6 @@ void noinstr __context_tracking_exit(enu
 				instrumentation_end();
 			}
 		}
-		__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
 	}
 	context_tracking_recursion_exit();
 }


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ