lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <26bdd83f-f4a0-dbfc-e6f9-fc3780ce0080@amd.com>
Date:   Sat, 30 Sep 2017 16:17:39 -0500
From:   Brijesh Singh <brijesh.singh@....com>
To:     Borislav Petkov <bp@...e.de>,
        Tom Lendacky <thomas.lendacky@....com>
Cc:     brijesh.singh@....com, Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        kvm@...r.kernel.org, x86@...nel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] x86/CPU/AMD, mm: Extend with mem_encrypt=sme option



On 9/30/17 6:56 AM, Borislav Petkov wrote:
...
> Ok, I went and simplified this whole code path a bit because it was
> needlessly a bit too complex. Below is the result, only compile-tested.
>
> Brijesh, Tom, guys, please check my logic, I might've missed a case.

I will take a closure look at this patch on Monday but at a glance I am
not sure if patch is addressing our main issue. We were trying to limit
the SEV feature exposure from the host OS. The current logic is:

1. Check whether the SME CPUID leaf is present

2. Check if we are running under hypervisor

3. If we are running under hypervisor, check SME_ENABLED bit in
MSR_AMD64_SEV

3.1 If bit is cleared, its non SEV guest. Return from the function.

3.2 If bit is set, its SEV guest. We set sev_enabled to 'true' and also
set 'sme_me_mask'. Return from the function.
The SEV state *cannot* be controlled by a command line option.

4. We are booted on bare-metal. Check if memory encryption is enabled.
if enabled, look at any potential command line.

Please see the sme_enable() from mem_encrypt.c [1].

https://github.com/codomania/tip/blob/sev-v5-p1/arch/x86/mm/mem_encrypt.c

Thanks
Brijesh

> Thanks.
>
> ---
> From: Borislav Petkov <bp@...e.de>
> Date: Sat, 30 Sep 2017 13:33:26 +0200
> Subject: [PATCH] x86/CPU/AMD, mm: Extend with mem_encrypt=sme option
>
> Extend the mem_encrypt= cmdline option with the "sme" argument so that
> one can enable SME only (i.e., this serves as a SEV chicken bit). While
> at it, streamline and document the flow logic here:
>
> 1. Check whether the SME CPUID leaf is present
>
> 2. Check whether the HW has enabled SME/SEV
>
> 3. Only *then* look at any potential command line params because doing
> so before is pointless.
>
> 3.1 mem_encrypt=on  - enable both SME/SEV
> 3.2 mem_encrypt=sme - enable only SME
> 3.3 mem_encrypt=off - disable both
>
> In addition, CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT enables both if
> the kernel is built with it enabled.
>
> While at it, shorten variable names, simplify code flow.
>
> This is based on a patch by Brijesh Singh <brijesh.singh@....com>.
>
> Signed-off-by: Borislav Petkov <bp@...e.de>
> Cc: Brijesh Singh <brijesh.singh@....com>
> Cc: Tom Lendacky <thomas.lendacky@....com>
> Cc: kvm@...r.kernel.org
> Cc: x86@...nel.org
> ---
>  arch/x86/include/asm/mem_encrypt.h |  2 +
>  arch/x86/kernel/cpu/amd.c          |  6 +++
>  arch/x86/mm/mem_encrypt.c          | 82 +++++++++++++++++++-------------------
>  3 files changed, 49 insertions(+), 41 deletions(-)
>
> diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
> index 3ba68c92be1b..175310f00202 100644
> --- a/arch/x86/include/asm/mem_encrypt.h
> +++ b/arch/x86/include/asm/mem_encrypt.h
> @@ -19,6 +19,8 @@
>  
>  #include <asm/bootparam.h>
>  
> +extern bool sev_enabled;
> +
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  
>  extern u64 sme_me_mask;
> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
> index c1234aa0550c..d0669f3966a6 100644
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -13,6 +13,7 @@
>  #include <asm/smp.h>
>  #include <asm/pci-direct.h>
>  #include <asm/delay.h>
> +#include <asm/mem_encrypt.h>
>  
>  #ifdef CONFIG_X86_64
>  # include <asm/mmconfig.h>
> @@ -32,6 +33,8 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
>   */
>  static u32 nodes_per_socket = 1;
>  
> +bool sev_enabled __section(.data) = false;
> +
>  static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
>  {
>  	u32 gprs[8] = { 0 };
> @@ -588,6 +591,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
>  		if (IS_ENABLED(CONFIG_X86_32))
>  			goto clear_all;
>  
> +		if (!sev_enabled)
> +			goto clear_sev;
> +
>  		rdmsrl(MSR_K7_HWCR, msr);
>  		if (!(msr & MSR_K7_HWCR_SMMLOCK))
>  			goto clear_sev;
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 057417a3d9b4..9b83bc1be7c0 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -27,12 +27,14 @@
>  #include <asm/processor-flags.h>
>  #include <asm/msr.h>
>  #include <asm/cmdline.h>
> +#include <asm/mem_encrypt.h>
>  
>  #include "mm_internal.h"
>  
> -static char sme_cmdline_arg[] __initdata = "mem_encrypt";
> -static char sme_cmdline_on[]  __initdata = "on";
> -static char sme_cmdline_off[] __initdata = "off";
> +static char sme_cmd[] __initdata = "mem_encrypt";
> +static char sme_cmd_on[]  __initdata = "on";
> +static char sme_cmd_off[] __initdata = "off";
> +static char sme_cmd_sme[] __initdata = "sme";
>  
>  /*
>   * Since SME related variables are set early in the boot process they must
> @@ -44,8 +46,6 @@ EXPORT_SYMBOL_GPL(sme_me_mask);
>  DEFINE_STATIC_KEY_FALSE(__sev);
>  EXPORT_SYMBOL_GPL(__sev);
>  
> -static bool sev_enabled __section(.data) = false;
> -
>  /* Buffer used for early in-place encryption by BSP, no locking needed */
>  static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
>  
> @@ -768,13 +768,13 @@ void __init sme_encrypt_kernel(void)
>  
>  void __init __nostackprotector sme_enable(struct boot_params *bp)
>  {
> -	const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
> +	const char *cmdline_ptr, *cmd, *cmd_on, *cmd_off, *cmd_sme;
>  	unsigned int eax, ebx, ecx, edx;
>  	unsigned long feature_mask;
> -	bool active_by_default;
> -	unsigned long me_mask;
> +	u64 me_mask, msr;
>  	char buffer[16];
> -	u64 msr;
> +	bool sme_only;
> +	int ret;
>  
>  	/* Check for the SME/SEV support leaf */
>  	eax = 0x80000000;
> @@ -808,55 +808,55 @@ void __init __nostackprotector sme_enable(struct boot_params *bp)
>  	if (!(eax & feature_mask))
>  		return;
>  
> -	me_mask = 1UL << (ebx & 0x3f);
> -
> -	/* Check if memory encryption is enabled */
> +	/* For SME, check the SYSCFG MSR */
>  	if (feature_mask == AMD_SME_BIT) {
> -		/* For SME, check the SYSCFG MSR */
>  		msr = __rdmsr(MSR_K8_SYSCFG);
>  		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
>  			return;
> -	} else {
> -		/* For SEV, check the SEV MSR */
> +	}
> +
> +	/* For SEV, check the SEV MSR */
> +	if (feature_mask == AMD_SEV_BIT) {
>  		msr = __rdmsr(MSR_AMD64_SEV);
>  		if (!(msr & MSR_AMD64_SEV_ENABLED))
>  			return;
> -
> -		/* SEV state cannot be controlled by a command line option */
> -		sme_me_mask = me_mask;
> -		sev_enabled = true;
> -		return;
>  	}
>  
> +	me_mask = BIT_ULL(ebx & 0x3f);
> +
>  	/*
>  	 * Fixups have not been applied to phys_base yet and we're running
>  	 * identity mapped, so we must obtain the address to the SME command
>  	 * line argument data using rip-relative addressing.
>  	 */
> -	asm ("lea sme_cmdline_arg(%%rip), %0"
> -	     : "=r" (cmdline_arg)
> -	     : "p" (sme_cmdline_arg));
> -	asm ("lea sme_cmdline_on(%%rip), %0"
> -	     : "=r" (cmdline_on)
> -	     : "p" (sme_cmdline_on));
> -	asm ("lea sme_cmdline_off(%%rip), %0"
> -	     : "=r" (cmdline_off)
> -	     : "p" (sme_cmdline_off));
> -
> -	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
> -		active_by_default = true;
> -	else
> -		active_by_default = false;
> +	asm ("lea sme_cmd(%%rip), %0"	  : "=r" (cmd)	   : "p" (sme_cmd));
> +	asm ("lea sme_cmd_on(%%rip), %0"  : "=r" (cmd_on)  : "p" (sme_cmd_on));
> +	asm ("lea sme_cmd_off(%%rip), %0" : "=r" (cmd_off) : "p" (sme_cmd_off));
> +	asm ("lea sme_cmd_sme(%%rip), %0" : "=r" (cmd_sme) : "p" (sme_cmd_sme));
>  
>  	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
> -				     ((u64)bp->ext_cmd_line_ptr << 32));
> +				    ((u64)bp->ext_cmd_line_ptr << 32));
>  
> -	cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
> +	ret = cmdline_find_option(cmdline_ptr, cmd, buffer, sizeof(buffer));
> +	if (ret < 0)
> +		return;
>  
> -	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
> -		sme_me_mask = me_mask;
> -	else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
> +	if (!strncmp(buffer, cmd_off, sizeof(buffer))) {
>  		sme_me_mask = 0;
> -	else
> -		sme_me_mask = active_by_default ? me_mask : 0;
> +		return;
> +	} else if (!strncmp(buffer, cmd_on, sizeof(buffer))) {
> +		sme_me_mask = me_mask;
> +	} else if (!strncmp(buffer, cmd_sme, sizeof(buffer))) {
> +		sme_only = true;
> +	}
> +
> +	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
> +		sme_me_mask = me_mask;
> +
> +	if (sme_only)
> +		return;
> +
> +	/* For SEV, check the SEV MSR */
> +	if (feature_mask == AMD_SEV_BIT)
> +		sev_enabled = true;
>  }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ