lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250521095914.GBaC2j8kmYixjs66Sd@fat_crate.local>
Date: Wed, 21 May 2025 11:59:14 +0200
From: Borislav Petkov <bp@...en8.de>
To: Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>
Cc: x86@...nel.org, David Kaplan <david.kaplan@....com>,
	linux-kernel@...r.kernel.org, "H. Peter Anvin" <hpa@...or.com>,
	Josh Poimboeuf <jpoimboe@...nel.org>
Subject: Re: [PATCH 2/2] x86/its: Allow "=stuff" mitigation when eIBRS is
 enabled

On Tue, May 20, 2025 at 10:35:36PM -0700, Pawan Gupta wrote:
> After a recent restructuring of ITS mitigation, RSB stuffing can no
> longer be enabled in eIBRS+Retpoline mode. Before ITS, retbleed
> mitigation only allowed stuffing when eIBRS was not enabled. This was
> perfectly fine since eIBRS mitigates retbleed.
> 
> However, RSB stuffing mitigation for ITS is still required with eIBRS.
> The restructuring solely relies on retbleed to deploy stuffing, and does
> not allow it when eIBRS is enabled. This behavior is different from
> what was before the restructuring.
> 
> Allow RSB stuffing mitigation when eIBRS+retpoline is enabled. Also
> allow retbleed and ITS mitigation to independently enable stuffing. The
> dependency is only with respect to retpoline. It is a valid case for
> retbleed to be mitigated by eIBRS while ITS deploys stuffing at the same
> time.

This one definitely needs splitting, just from reading the commit message
I can see separate patches.

> 
> Fixes: 8c57ca583ebf ("x86/bugs: Restructure ITS mitigation")
> Closes: https://lore.kernel.org/lkml/20250519235101.2vm6sc5txyoykb2r@desk/
> Signed-off-by: Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>
> ---
>  arch/x86/kernel/cpu/bugs.c | 101 ++++++++++++++++++++++++++++++---------------
>  1 file changed, 67 insertions(+), 34 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
> index 7f94e6a5497d9a2d312a76095e48d6b364565777..642d234943fe8fc657c7331ceb3a605201444166 100644
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -113,6 +113,9 @@ void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
>  
>  static void __init set_return_thunk(void *thunk)
>  {
> +	if (thunk == x86_return_thunk)
> +		return;

This needs a separate patch too.

> +
>  	if (x86_return_thunk != __x86_return_thunk)
>  		pr_warn("x86/bugs: return thunk changed\n");
>  
> @@ -1120,6 +1123,39 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
>  
>  enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
>  
> +static inline bool spectre_v2_in_retpoline_mode(enum spectre_v2_mitigation mode)
> +{
> +	switch (mode) {
> +	case SPECTRE_V2_NONE:
> +	case SPECTRE_V2_IBRS:
> +	case SPECTRE_V2_EIBRS:
> +	case SPECTRE_V2_LFENCE:
> +	case SPECTRE_V2_EIBRS_LFENCE:
> +		return false;
> +
> +	case SPECTRE_V2_RETPOLINE:
> +	case SPECTRE_V2_EIBRS_RETPOLINE:
> +		return true;
> +	}
> +
> +	pr_warn("Unknown spectre_v2 mitigation mode %d\n", mode);
> +
> +	return false;
> +}

A whole function with a almost useless switch-case just for this?

> +
> +/* Depends on spectre_v2 mitigation selected already */
> +static inline bool cdt_possible(enum spectre_v2_mitigation mode)
> +{
> +	if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
> +	    !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
> +		return false;
> +
> +	if (!spectre_v2_in_retpoline_mode(mode))
> +		return false;

You're using this function only once here. What's wrong with

	if ((mode != SPECTRE_V2_RETPOLINE) &&
	    (mode != SPECTRE_V2_EIBRS_RETPOLINE))

?

> +
> +	return true;
> +}
> +
>  #undef pr_fmt
>  #define pr_fmt(fmt)     "RETBleed: " fmt
>  
> @@ -1258,24 +1294,16 @@ static void __init retbleed_update_mitigation(void)
>  	if (retbleed_mitigation == RETBLEED_MITIGATION_NONE)
>  		goto out;
>  
> -	/*
> -	 * retbleed=stuff is only allowed on Intel.  If stuffing can't be used
> -	 * then a different mitigation will be selected below.
> -	 *
> -	 * its=stuff will also attempt to enable stuffing.
> -	 */
> -	if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF ||
> -	    its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) {
> -		if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) {
> -			pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
> -			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
> -		} else {
> -			if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
> -				pr_info("Retbleed mitigation updated to stuffing\n");
> +	 /* ITS can also enable stuffing */

This needs a separate patch.

> +	if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
> +		retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
>  
> -			retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
> -		}
> +	if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
> +	    !cdt_possible(spectre_v2_enabled)) {
> +		pr_err("WARNING: retbleed=stuff depends on retpoline\n");
> +		retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
>  	}
> +
>  	/*
>  	 * Let IBRS trump all on Intel without affecting the effects of the
>  	 * retbleed= cmdline option except for call depth based stuffing
> @@ -1294,15 +1322,15 @@ static void __init retbleed_update_mitigation(void)
>  			if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
>  				pr_err(RETBLEED_INTEL_MSG);
>  		}
> -		/* If nothing has set the mitigation yet, default to NONE. */
> -		if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO)
> -			retbleed_mitigation = RETBLEED_MITIGATION_NONE;
>  	}
> +
> +	/* If nothing has set the mitigation yet, default to NONE. */
> +	if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO)
> +		retbleed_mitigation = RETBLEED_MITIGATION_NONE;
>  out:
>  	pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
>  }
>  
> -
>  static void __init retbleed_apply_mitigation(void)
>  {
>  	bool mitigate_smt = false;
> @@ -1449,6 +1477,7 @@ static void __init its_update_mitigation(void)
>  		its_mitigation = ITS_MITIGATION_OFF;
>  		break;
>  	case SPECTRE_V2_RETPOLINE:
> +	case SPECTRE_V2_EIBRS_RETPOLINE:
>  		/* Retpoline+CDT mitigates ITS */
>  		if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
>  			its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
> @@ -1462,13 +1491,8 @@ static void __init its_update_mitigation(void)
>  		break;
>  	}
>  
> -	/*
> -	 * retbleed_update_mitigation() will try to do stuffing if its=stuff.
> -	 * If it can't, such as if spectre_v2!=retpoline, then fall back to
> -	 * aligned thunks.
> -	 */
>  	if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
> -	    retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
> +	    !cdt_possible(spectre_v2_enabled))
>  		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
>  
>  	pr_info("%s\n", its_strings[its_mitigation]);
> @@ -1476,15 +1500,24 @@ static void __init its_update_mitigation(void)
>  
>  static void __init its_apply_mitigation(void)
>  {
> -	/* its=stuff forces retbleed stuffing and is enabled there. */
> -	if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS)
> -		return;
> -
> -	if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
> -		setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
> +	switch (its_mitigation) {
> +	case ITS_MITIGATION_OFF:
> +	case ITS_MITIGATION_AUTO:
> +	case ITS_MITIGATION_VMEXIT_ONLY:
> +		break;
> +	case ITS_MITIGATION_ALIGNED_THUNKS:
> +		if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
> +			setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
>  
> -	setup_force_cpu_cap(X86_FEATURE_RETHUNK);
> -	set_return_thunk(its_return_thunk);
> +		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
> +		set_return_thunk(its_return_thunk);
> +		break;
> +	case ITS_MITIGATION_RETPOLINE_STUFF:
> +		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
> +		setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
> +		set_return_thunk(call_depth_return_thunk);
> +		break;
> +	}

I don't think you need to return the switch-case back but this is
unreviewable. Please split. It is perfectly fine if you split into trivial
patches which do one logical thing and one logical thing only.

Thx.

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ