lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 1 Mar 2019 00:57:08 -0600
From:   Andre Przywara <andre.przywara@....com>
To:     Jeremy Linton <jeremy.linton@....com>,
        linux-arm-kernel@...ts.infradead.org
Cc:     catalin.marinas@....com, will.deacon@....com, marc.zyngier@....com,
        suzuki.poulose@....com, Dave.Martin@....com,
        shankerd@...eaurora.org, julien.thierry@....com,
        mlangsdo@...hat.com, stefan.wahren@....com,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 04/10] arm64: Advertise mitigation of Spectre-v2, or
 lack thereof

Hi,

On 2/26/19 7:05 PM, Jeremy Linton wrote:
> From: Marc Zyngier <marc.zyngier@....com>
> 
> We currently have a list of CPUs affected by Spectre-v2, for which
> we check that the firmware implements ARCH_WORKAROUND_1. It turns
> out that not all firmwares do implement the required mitigation,
> and that we fail to let the user know about it.
> 
> Instead, let's slightly revamp our checks, and rely on a whitelist
> of cores that are known to be non-vulnerable, and let the user know
> the status of the mitigation in the kernel log.
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@....com>
> [This makes more sense in front of the sysfs patch]
> [Pick pieces of that patch into this and move it earlier]
> Signed-off-by: Jeremy Linton <jeremy.linton@....com>

Indeed a whitelist is much better.

Reviewed-by: Andre Przywara <andre.przywara@....com>

Cheers,
Andre.

> ---
>   arch/arm64/kernel/cpu_errata.c | 108 +++++++++++++++++----------------
>   1 file changed, 56 insertions(+), 52 deletions(-)
> 
> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
> index ad58958becb6..c8972255b365 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -131,9 +131,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
>   	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
>   }
>   
> -static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
> -				      const char *hyp_vecs_start,
> -				      const char *hyp_vecs_end)
> +static void install_bp_hardening_cb(bp_hardening_cb_t fn,
> +				    const char *hyp_vecs_start,
> +				    const char *hyp_vecs_end)
>   {
>   	static DEFINE_RAW_SPINLOCK(bp_lock);
>   	int cpu, slot = -1;
> @@ -177,23 +177,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
>   }
>   #endif	/* CONFIG_KVM_INDIRECT_VECTORS */
>   
> -static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
> -				     bp_hardening_cb_t fn,
> -				     const char *hyp_vecs_start,
> -				     const char *hyp_vecs_end)
> -{
> -	u64 pfr0;
> -
> -	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
> -		return;
> -
> -	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
> -	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
> -		return;
> -
> -	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
> -}
> -
>   #include <uapi/linux/psci.h>
>   #include <linux/arm-smccc.h>
>   #include <linux/psci.h>
> @@ -228,31 +211,27 @@ static int __init parse_nospectre_v2(char *str)
>   }
>   early_param("nospectre_v2", parse_nospectre_v2);
>   
> -static void
> -enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
> +/*
> + * -1: No workaround
> + *  0: No workaround required
> + *  1: Workaround installed
> + */
> +static int detect_harden_bp_fw(void)
>   {
>   	bp_hardening_cb_t cb;
>   	void *smccc_start, *smccc_end;
>   	struct arm_smccc_res res;
>   	u32 midr = read_cpuid_id();
>   
> -	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
> -		return;
> -
> -	if (__nospectre_v2) {
> -		pr_info_once("spectrev2 mitigation disabled by command line option\n");
> -		return;
> -	}
> -
>   	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
> -		return;
> +		return -1;
>   
>   	switch (psci_ops.conduit) {
>   	case PSCI_CONDUIT_HVC:
>   		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
>   				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
>   		if ((int)res.a0 < 0)
> -			return;
> +			return -1;
>   		cb = call_hvc_arch_workaround_1;
>   		/* This is a guest, no need to patch KVM vectors */
>   		smccc_start = NULL;
> @@ -263,23 +242,23 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
>   		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
>   				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
>   		if ((int)res.a0 < 0)
> -			return;
> +			return -1;
>   		cb = call_smc_arch_workaround_1;
>   		smccc_start = __smccc_workaround_1_smc_start;
>   		smccc_end = __smccc_workaround_1_smc_end;
>   		break;
>   
>   	default:
> -		return;
> +		return -1;
>   	}
>   
>   	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
>   	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
>   		cb = qcom_link_stack_sanitization;
>   
> -	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
> +	install_bp_hardening_cb(cb, smccc_start, smccc_end);
>   
> -	return;
> +	return 1;
>   }
>   #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
>   
> @@ -521,24 +500,49 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
>   	CAP_MIDR_RANGE_LIST(midr_list)
>   
>   #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> -
>   /*
> - * List of CPUs where we need to issue a psci call to
> - * harden the branch predictor.
> + * List of CPUs that do not need any Spectre-v2 mitigation at all.
>    */
> -static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
> -	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
> -	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
> -	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
> -	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
> -	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
> -	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
> -	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
> -	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
> -	MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
> -	{},
> +static const struct midr_range spectre_v2_safe_list[] = {
> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
> +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
> +	{ /* sentinel */ }
>   };
>   
> +static bool __maybe_unused
> +check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
> +{
> +	int need_wa;
> +
> +	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
> +
> +	/* If the CPU has CSV2 set, we're safe */
> +	if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
> +						 ID_AA64PFR0_CSV2_SHIFT))
> +		return false;
> +
> +	/* Alternatively, we have a list of unaffected CPUs */
> +	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
> +		return false;
> +
> +	/* Fallback to firmware detection */
> +	need_wa = detect_harden_bp_fw();
> +	if (!need_wa)
> +		return false;
> +
> +	/* forced off */
> +	if (__nospectre_v2) {
> +		pr_info_once("spectrev2 mitigation disabled by command line option\n");
> +		return false;
> +	}
> +
> +	if (need_wa < 0)
> +		pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
> +
> +	return (need_wa > 0);
> +}
> +
>   #endif
>   
>   #ifdef CONFIG_HARDEN_EL2_VECTORS
> @@ -717,8 +721,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
>   #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
>   	{
>   		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
> -		.cpu_enable = enable_smccc_arch_workaround_1,
> -		ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
> +		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
> +		.matches = check_branch_predictor,
>   	},
>   #endif
>   #ifdef CONFIG_HARDEN_EL2_VECTORS
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ