[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0fce2637-ca61-778f-8cf0-a28183758b52@foss.arm.com>
Date: Fri, 1 Mar 2019 00:58:48 -0600
From: Andre Przywara <andre.przywara@....com>
To: Jeremy Linton <jeremy.linton@....com>,
linux-arm-kernel@...ts.infradead.org
Cc: catalin.marinas@....com, will.deacon@....com, marc.zyngier@....com,
suzuki.poulose@....com, Dave.Martin@....com,
shankerd@...eaurora.org, julien.thierry@....com,
mlangsdo@...hat.com, stefan.wahren@....com,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 06/10] arm64: Always enable spectrev2 vulnerability
detection
Hi,
On 2/26/19 7:05 PM, Jeremy Linton wrote:
> The sysfs patches need to display machine vulnerability
> status regardless of kernel config. Prepare for that
> by breaking out the vulnerability/mitigation detection
> code from the logic which implements the mitigation.
>
> Signed-off-by: Jeremy Linton <jeremy.linton@....com>
> ---
> arch/arm64/kernel/cpu_errata.c | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
> index 77f021e78a28..a27e1ee750e1 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -109,12 +109,12 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
>
> atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
>
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> #include <asm/mmu_context.h>
> #include <asm/cacheflush.h>
>
> DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
>
> +
extra empty line
Apart from that picky and unimportant nit it looks alright and compiles
with and without CONFIG_HARDEN_BRANCH_PREDICTOR being defined.
Reviewed-by: Andre Przywara <andre.przywara@....com>
Cheers,
Andre.
> #ifdef CONFIG_KVM_INDIRECT_VECTORS
> extern char __smccc_workaround_1_smc_start[];
> extern char __smccc_workaround_1_smc_end[];
> @@ -270,11 +270,11 @@ static int detect_harden_bp_fw(void)
> ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
> cb = qcom_link_stack_sanitization;
>
> - install_bp_hardening_cb(cb, smccc_start, smccc_end);
> + if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
> + install_bp_hardening_cb(cb, smccc_start, smccc_end);
>
> return 1;
> }
> -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
>
> #ifdef CONFIG_ARM64_SSBD
> DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
> @@ -513,7 +513,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
> .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
> CAP_MIDR_RANGE_LIST(midr_list)
>
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> /*
> * List of CPUs that do not need any Spectre-v2 mitigation at all.
> */
> @@ -545,6 +544,11 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
> if (!need_wa)
> return false;
>
> + if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
> + pr_warn_once("spectrev2 mitigation disabled by configuration\n");
> + return false;
> + }
> +
> /* forced off */
> if (__nospectre_v2) {
> pr_info_once("spectrev2 mitigation disabled by command line option\n");
> @@ -557,8 +561,6 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
> return (need_wa > 0);
> }
>
> -#endif
> -
> #ifdef CONFIG_HARDEN_EL2_VECTORS
>
> static const struct midr_range arm64_harden_el2_vectors[] = {
> @@ -732,13 +734,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
> ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
> },
> #endif
> -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> {
> .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
> .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
> .matches = check_branch_predictor,
> },
> -#endif
> #ifdef CONFIG_HARDEN_EL2_VECTORS
> {
> .desc = "EL2 vector hardening",
>
Powered by blists - more mailing lists