lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180620123208.GN2476@hirez.programming.kicks-ass.net>
Date:   Wed, 20 Jun 2018 14:32:08 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Thomas Gleixner <tglx@...utronix.de>
Cc:     Pavel Tatashin <pasha.tatashin@...cle.com>,
        steven.sistare@...cle.com, daniel.m.jordan@...cle.com,
        linux@...linux.org.uk, schwidefsky@...ibm.com,
        heiko.carstens@...ibm.com, john.stultz@...aro.org,
        sboyd@...eaurora.org, x86@...nel.org, linux-kernel@...r.kernel.org,
        mingo@...hat.com, hpa@...or.com, douly.fnst@...fujitsu.com,
        prarit@...hat.com, feng.tang@...el.com, pmladek@...e.com,
        gnomes@...rguk.ukuu.org.uk
Subject: Re: [PATCH v10 7/7] x86/tsc: use tsc early

On Wed, Jun 20, 2018 at 12:42:40PM +0200, Thomas Gleixner wrote:
> On Wed, 20 Jun 2018, Peter Zijlstra wrote:

> > I'm still puzzled by the entire need for tsc_early_enabled and all that.
> > Esp. since both branches do the exact same thing:
> > 
> > 	return cycles_2_ns(rdtsc());
> 
> Right. But up to the point where the real sched_clock initialization can be
> done and the static keys can be flipped, there must be a way to
> conditinally use TSC depending on availablility and early initialization.

Ah, so we want to flip keys early, can be done, see below.

> You might argue, that we shouldn't care becasue the jiffies case is just
> the worst case fallback anyway. I wouldn't even disagree as those old
> machines which have TSC varying with the CPU frequency really should not
> matter anymore. Pavel might disagree of course.

You forgot (rightfully) that we even use TSC on those !constant
machines, we adjust the cycles_2_ns thing from the cpufreq notifiers.

The only case we should _ever_ use that jiffies callback is when TSC
really isn't there. Basically, if we kill notsc, we could make
native_sched_clock() := cycles_2_ns(rdtsc()) (for CONFIG_X86_TSC), the
end.

No static keys, nothing.

That said; flipping static keys early isn't hard. We should call
jump_label_init() early, because we want the entries sorted and the
key->entries link set. It will also replace the GENERIC_NOP5_ATOMIC
thing, which means we need to also do arch_init_ideal_nop() early, but
since that is pure CPUID based that should be doable.

And then something like the below could be used.

---
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e56c95be2808..2dd8c5bdd87b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -140,4 +140,38 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
 		__jump_label_transform(entry, type, text_poke_early, 1);
 }
 
+void jump_label_update_early(struct static_key *key, bool enable)
+{
+	struct jump_entry *entry, *stop = __stop___jump_table;
+
+	/*
+	 * We need the table sorted and key->entries set up.
+	 */
+	WARN_ON_ONCE(!static_key_initialized);
+
+	entry = static_key_entries(key);
+
+	/*
+	 * Sanity check for early users, there had beter be a core kernel user.
+	 */
+	if (!entry || !entry->code || !core_kernel_text(entry->code)) {
+		WARN_ON(1);
+		return;
+	}
+
+	for ( ; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
+		enum jump_label_type type = enable ^ jump_entry_branch(entry);
+		__jump_label_transform(entry, type, text_poke_early, 0);
+	}
+
+	atomic_set_release(&key->enabled, !!enabled);
+}
+
+#else
+
+void jump_label_update_early(struct static_key *key, bool enable)
+{
+	atomic_set(&key->enabled, !!enabled);
+}
+
 #endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b46b541c67c4..cac61beca25f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -79,6 +79,7 @@
 
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/bug.h>
 
 extern bool static_key_initialized;
 
@@ -110,6 +111,17 @@ struct static_key {
 	};
 };
 
+#define JUMP_TYPE_FALSE		0UL
+#define JUMP_TYPE_TRUE		1UL
+#define JUMP_TYPE_LINKED	2UL
+#define JUMP_TYPE_MASK		3UL
+
+static inline struct jump_entry *static_key_entries(struct static_key *key)
+{
+	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
+	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
+}
+
 #else
 struct static_key {
 	atomic_t enabled;
@@ -119,6 +131,17 @@ struct static_key {
 
 #ifdef HAVE_JUMP_LABEL
 #include <asm/jump_label.h>
+
+static inline struct static_key *jump_entry_key(struct jump_entry *entry)
+{
+	return (struct static_key *)((unsigned long)entry->key & ~1UL);
+}
+
+static inline bool jump_entry_branch(struct jump_entry *entry)
+{
+	return (unsigned long)entry->key & 1UL;
+}
+
 #endif
 
 #ifndef __ASSEMBLY__
@@ -132,11 +155,6 @@ struct module;
 
 #ifdef HAVE_JUMP_LABEL
 
-#define JUMP_TYPE_FALSE		0UL
-#define JUMP_TYPE_TRUE		1UL
-#define JUMP_TYPE_LINKED	2UL
-#define JUMP_TYPE_MASK		3UL
-
 static __always_inline bool static_key_false(struct static_key *key)
 {
 	return arch_static_branch(key, false);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 01ebdf1f9f40..9710fa7582aa 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -295,12 +295,6 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
 	arch_jump_label_transform(entry, type);
 }
 
-static inline struct jump_entry *static_key_entries(struct static_key *key)
-{
-	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
-	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
-}
-
 static inline bool static_key_type(struct static_key *key)
 {
 	return key->type & JUMP_TYPE_TRUE;
@@ -321,16 +315,6 @@ static inline void static_key_set_linked(struct static_key *key)
 	key->type |= JUMP_TYPE_LINKED;
 }
 
-static inline struct static_key *jump_entry_key(struct jump_entry *entry)
-{
-	return (struct static_key *)((unsigned long)entry->key & ~1UL);
-}
-
-static bool jump_entry_branch(struct jump_entry *entry)
-{
-	return (unsigned long)entry->key & 1UL;
-}
-
 /***
  * A 'struct static_key' uses a union such that it either points directly
  * to a table of 'struct jump_entry' or to a linked list of modules which in

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ