[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1548946743-38979-15-git-send-email-julien.thierry@arm.com>
Date: Thu, 31 Jan 2019 14:58:52 +0000
From: Julien Thierry <julien.thierry@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, daniel.thompson@...aro.org,
joel@...lfernandes.org, marc.zyngier@....com,
christoffer.dall@....com, james.morse@....com,
catalin.marinas@....com, will.deacon@....com, mark.rutland@....com,
Julien Thierry <julien.thierry@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Marc Zyngier <Marc.Zyngier@....com>,
Christoffer Dall <Christoffer.Dall@....com>
Subject: [PATCH v10 14/25] arm64: alternative: Allow alternative status checking per cpufeature
In preparation for the application of alternatives at different points
during the boot process, provide the possibility to check whether
alternatives for a feature of interest was already applied instead of
having a global boolean for all alternatives.
Make VHE enablement code check for the VHE feature instead of considering
all alternatives.
Signed-off-by: Julien Thierry <julien.thierry@....com>
Acked-by: Marc Zyngier <marc.zyngier@....com>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will.deacon@....com>
Cc: Suzuki K Poulose <suzuki.poulose@....com>
Cc: Marc Zyngier <Marc.Zyngier@....com>
Cc: Christoffer Dall <Christoffer.Dall@....com>
---
arch/arm64/include/asm/alternative.h | 3 +--
arch/arm64/kernel/alternative.c | 21 +++++++++++++++++----
arch/arm64/kernel/cpufeature.c | 2 +-
3 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 4b650ec..9806a23 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -14,8 +14,6 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
-extern int alternatives_applied;
-
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
@@ -28,6 +26,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void __init apply_alternatives_all(void);
+bool alternative_is_applied(u16 cpufeature);
#ifdef CONFIG_MODULES
void apply_alternatives_module(void *start, size_t length);
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index b5d6039..c947d22 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -32,13 +32,23 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
-int alternatives_applied;
+static int all_alternatives_applied;
+
+static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
};
+bool alternative_is_applied(u16 cpufeature)
+{
+ if (WARN_ON(cpufeature >= ARM64_NCAPS))
+ return false;
+
+ return test_bit(cpufeature, applied_alternatives);
+}
+
/*
* Check if the target PC is within an alternative block.
*/
@@ -192,6 +202,9 @@ static void __apply_alternatives(void *alt_region, bool is_module)
dsb(ish);
__flush_icache_all();
isb();
+
+ /* We applied all that was available */
+ bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS);
}
}
@@ -208,14 +221,14 @@ static int __apply_alternatives_multi_stop(void *unused)
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(alternatives_applied))
+ while (!READ_ONCE(all_alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(alternatives_applied);
+ BUG_ON(all_alternatives_applied);
__apply_alternatives(®ion, false);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(alternatives_applied, 1);
+ WRITE_ONCE(all_alternatives_applied, 1);
}
return 0;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6f56e0a..d607ea3 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1118,7 +1118,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
* do anything here.
*/
- if (!alternatives_applied)
+ if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
#endif
--
1.9.1
Powered by blists - more mailing lists