[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150723104215.GH25159@twins.programming.kicks-ass.net>
Date: Thu, 23 Jul 2015 12:42:15 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Jason Baron <jasonbaron0@...il.com>
Cc: Borislav Petkov <bp@...en8.de>,
Andy Lutomirski <luto@...capital.net>,
Thomas Gleixner <tglx@...utronix.de>,
Mikulas Patocka <mpatocka@...hat.com>,
Paul Mackerras <paulus@...ba.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Kees Cook <keescook@...omium.org>,
Andrea Arcangeli <aarcange@...hat.com>,
Vince Weaver <vince@...ter.net>,
"hillf.zj" <hillf.zj@...baba-inc.com>,
Valdis Kletnieks <Valdis.Kletnieks@...edu>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Steven Rostedt <rostedt@...dmis.org>
Subject: Re: Kernel broken on processors without performance counters
On Wed, Jul 22, 2015 at 01:06:44PM -0400, Jason Baron wrote:
> Ok,
>
> So we could add all 4 possible initial states, where the
> branches would be:
>
> static_likely_init_true_branch(struct static_likely_init_true_key *key)
> static_likely_init_false_branch(struct static_likely_init_false_key *key)
> static_unlikely_init_false_branch(struct static_unlikely_init_false_key *key)
> static_unlikely_init_true_branch(struct static_unlikely_init_true_key *key)
I'm sorely tempted to go quote cypress hill here...
And I realize part of the problem is that we're wanting to use jump
labels before we can patch them. But surely we can do better.
extern bool ____wrong_branch_error(void);
struct static_key_true;
struct static_key_false;
#define static_branch_likely(x) \
({ \
bool branch; \
if (__builtin_types_compatible_p(typeof(x), struct static_key_true)) \
branch = !arch_static_branch(&(x)->key); \
else if (__builtin_types_compatible_p(typeof(x), struct static_key_false)) \
branch = !arch_static_branch_jump(&(x)->key); \
else \
branch = ____wrong_branch_error(); \
branch; \
})
#define static_branch_unlikely(x) \
({ \
bool branch; \
if (__builtin_types_compatible_p(typeof(x), struct static_key_true)) \
branch = arch_static_branch(&(x)->key); \
else if (__builtin_types_compatible_p(typeof(x), struct static_key_false)) \
branch = arch_static_branch_jump(&(x)->key); \
else \
branch = ____wrong_branch_error(); \
branch; \
})
Can't we make something like that work?
So the immediate problem appears to be the 4 different key inits, which don't
seem very supportive of this separation:
+#define STATIC_KEY_LIKEY_INIT_TRUE ((struct static_unlikely_init_true_key) \
+ { .key.enabled = ATOMIC_INIT(1), \
+ .key.entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
+#define STATIC_KEY_LIKEY_INIT_FALSE ((struct static_unlikely_init_false_key) \
+ { .key.enabled = ATOMIC_INIT(0), \
+ .key.entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
+#define STATIC_KEY_UNLIKELY_INIT_TRUE ((struct static_unlikely_init_true_key) \
+ { .key.enabled = ATOMIC_INIT(1), \
+ .key.entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+#define STATIC_KEY_UNLIKELY_INIT_FALSE ((struct static_unlikely_init_false_key) \
+ { .key.enabled = ATOMIC_INIT(0), \
+ .key.entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
But I think we can fix that by using a second __jump_table section, then
we can augment the LABEL_TYPE_{TRUE,FALSE} thing with the section we
find the jump_entry in.
Then we can do:
#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
And invert the jump_label_type if we're in the second section.
I think we'll need a second argument to the arch_static_branch*()
functions to indicate which section it needs to go in.
static __always_inline bool arch_static_branch(struct static_key *key, bool inv)
{
if (!inv) {
asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
} else {
asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table_inv, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
}
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool inv)
{
if (!inv) {
asm_volatile_goto("1:"
"jmp %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
} else {
asm_volatile_goto("1:"
"jmp %l[l_yes]\n\t"
".pushsection __jump_table_inv, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
}
return false;
l_yes:
return true;
}
And change the branch macros thusly:
#define static_branch_likely(x) \
({ \
bool branch; \
if (__builtin_types_compatible_p(typeof(x), struct static_key_true)) \
branch = !arch_static_branch(&(x)->key, false); \
else if (__builtin_types_compatible_p(typeof(x), struct static_key_false)) \
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
branch; \
})
#define static_branch_unlikely(x) \
({ \
bool branch; \
if (__builtin_types_compatible_p(typeof(x), struct static_key_true)) \
branch = arch_static_branch(&(x)->key, true); \
else if (__builtin_types_compatible_p(typeof(x), struct static_key_false)) \
branch = arch_static_branch_jump(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
branch; \
})
And I think it'll all work. Hmm?
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists