[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180720121409.948580976@linuxfoundation.org>
Date: Fri, 20 Jul 2018 14:14:04 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
James Morse <james.morse@....com>,
Christoffer Dall <cdall@...aro.org>,
Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <marc.zyngier@....com>
Subject: [PATCH 4.9 47/66] arm64: alternatives: use tpidr_el2 on VHE hosts
4.9-stable review patch. If anyone has any objections, please let me know.
------------------
From: James Morse <james.morse@....com>
Commit 6d99b68933fbcf51f84fcbba49246ce1209ec193 upstream.
Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in
tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1
on VHE hosts, and allows future code to blindly access per-cpu variables
without triggering world-switch.
Signed-off-by: James Morse <james.morse@....com>
Reviewed-by: Christoffer Dall <cdall@...aro.org>
Reviewed-by: Catalin Marinas <catalin.marinas@....com>
Signed-off-by: Catalin Marinas <catalin.marinas@....com>
Signed-off-by: Marc Zyngier <marc.zyngier@....com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/arm64/include/asm/alternative.h | 2 ++
arch/arm64/include/asm/assembler.h | 8 ++++++++
arch/arm64/include/asm/percpu.h | 12 ++++++++++--
arch/arm64/kernel/alternative.c | 9 +++++----
arch/arm64/kernel/cpufeature.c | 17 +++++++++++++++++
5 files changed, 42 insertions(+), 6 deletions(-)
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -11,6 +11,8 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
+extern int alternatives_applied;
+
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -245,7 +245,11 @@ lr .req x30 // link register
*/
.macro adr_this_cpu, dst, sym, tmp
adr_l \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
add \dst, \dst, \tmp
.endm
@@ -256,7 +260,11 @@ lr .req x30 // link register
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
ldr \dst, [\dst, \tmp]
.endm
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,9 +16,14 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/alternative.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
- asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+ asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
+ "msr tpidr_el2, %0",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ :: "r" (off) : "memory");
}
static inline unsigned long __my_cpu_offset(void)
@@ -29,7 +34,10 @@ static inline unsigned long __my_cpu_off
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrs %0, tpidr_el1" : "=r" (off) :
+ asm(ALTERNATIVE("mrs %0, tpidr_el1",
+ "mrs %0, tpidr_el2",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -32,6 +32,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+int alternatives_applied;
+
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
@@ -142,7 +144,6 @@ static void __apply_alternatives(void *a
*/
static int __apply_alternatives_multi_stop(void *unused)
{
- static int patched = 0;
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
@@ -150,14 +151,14 @@ static int __apply_alternatives_multi_st
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(patched))
+ while (!READ_ONCE(alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(patched);
+ BUG_ON(alternatives_applied);
__apply_alternatives(®ion);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(patched, 1);
+ WRITE_ONCE(alternatives_applied, 1);
}
return 0;
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -829,6 +829,22 @@ static int __init parse_kpti(char *str)
early_param("kpti", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+static int cpu_copy_el2regs(void *__unused)
+{
+ /*
+ * Copy register values that aren't redirected by hardware.
+ *
+ * Before code patching, we only set tpidr_el1, all CPUs need to copy
+ * this value to tpidr_el2 before we patch the code. Once we've done
+ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+ * do anything here.
+ */
+ if (!alternatives_applied)
+ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+
+ return 0;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -895,6 +911,7 @@ static const struct arm64_cpu_capabiliti
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
+ .enable = cpu_copy_el2regs,
},
{
.desc = "32-bit EL0 Support",
Powered by blists - more mailing lists