lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170301184055.gl3iic3gir6zzb23@pd.tnic>
Date:   Wed, 1 Mar 2017 19:40:56 +0100
From:   Borislav Petkov <bp@...en8.de>
To:     Tom Lendacky <thomas.lendacky@....com>
Cc:     linux-arch@...r.kernel.org, linux-efi@...r.kernel.org,
        kvm@...r.kernel.org, linux-doc@...r.kernel.org, x86@...nel.org,
        linux-kernel@...r.kernel.org, kasan-dev@...glegroups.com,
        linux-mm@...ck.org, iommu@...ts.linux-foundation.org,
        Rik van Riel <riel@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        Toshimitsu Kani <toshi.kani@....com>,
        Arnd Bergmann <arnd@...db.de>,
        Jonathan Corbet <corbet@....net>,
        Matt Fleming <matt@...eblueprint.co.uk>,
        "Michael S. Tsirkin" <mst@...hat.com>,
        Joerg Roedel <joro@...tes.org>,
        Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Brijesh Singh <brijesh.singh@....com>,
        Ingo Molnar <mingo@...hat.com>,
        Alexander Potapenko <glider@...gle.com>,
        Andy Lutomirski <luto@...nel.org>,
        "H. Peter Anvin" <hpa@...or.com>,
        Andrey Ryabinin <aryabinin@...tuozzo.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Larry Woodman <lwoodman@...hat.com>,
        Dmitry Vyukov <dvyukov@...gle.com>
Subject: Re: [RFC PATCH v4 28/28] x86: Add support to make use of Secure
 Memory Encryption

On Thu, Feb 16, 2017 at 09:48:25AM -0600, Tom Lendacky wrote:
> This patch adds the support to check if SME has been enabled and if
> memory encryption should be activated (checking of command line option
> based on the configuration of the default state).  If memory encryption
> is to be activated, then the encryption mask is set and the kernel is
> encrypted "in place."
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
> ---
>  arch/x86/kernel/head_64.S          |    1 +
>  arch/x86/kernel/mem_encrypt_init.c |   71 +++++++++++++++++++++++++++++++++++-
>  arch/x86/mm/mem_encrypt.c          |    2 +
>  3 files changed, 73 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index edd2f14..e6820e7 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -97,6 +97,7 @@ startup_64:
>  	 * Save the returned mask in %r12 for later use.
>  	 */
>  	push	%rsi
> +	movq	%rsi, %rdi
>  	call	sme_enable
>  	pop	%rsi
>  	movq	%rax, %r12
> diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c
> index 07cbb90..35c5e3d 100644
> --- a/arch/x86/kernel/mem_encrypt_init.c
> +++ b/arch/x86/kernel/mem_encrypt_init.c
> @@ -19,6 +19,12 @@
>  #include <linux/mm.h>
>  
>  #include <asm/sections.h>
> +#include <asm/processor-flags.h>
> +#include <asm/msr.h>
> +#include <asm/cmdline.h>
> +
> +static char sme_cmdline_arg_on[] __initdata = "mem_encrypt=on";
> +static char sme_cmdline_arg_off[] __initdata = "mem_encrypt=off";
>  
>  extern void sme_encrypt_execute(unsigned long, unsigned long, unsigned long,
>  				void *, pgd_t *);
> @@ -217,8 +223,71 @@ unsigned long __init sme_get_me_mask(void)
>  	return sme_me_mask;
>  }
>  
> -unsigned long __init sme_enable(void)
> +unsigned long __init sme_enable(void *boot_data)

unsigned long __init sme_enable(struct boot_params *bp)

works too.

And then you need to correct the function signature in the
!CONFIG_AMD_MEM_ENCRYPT case, at the end of this file, too:

unsigned long __init sme_enable(struct boot_params *bp)		{ return 0; }

>  {
> +	struct boot_params *bp = boot_data;
> +	unsigned int eax, ebx, ecx, edx;
> +	unsigned long cmdline_ptr;
> +	bool enable_if_found;
> +	void *cmdline_arg;
> +	u64 msr;
> +
> +	/* Check for an AMD processor */
> +	eax = 0;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if ((ebx != 0x68747541) || (edx != 0x69746e65) || (ecx != 0x444d4163))
> +		goto out;
> +
> +	/* Check for the SME support leaf */
> +	eax = 0x80000000;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if (eax < 0x8000001f)
> +		goto out;
> +
> +	/*
> +	 * Check for the SME feature:
> +	 *   CPUID Fn8000_001F[EAX] - Bit 0
> +	 *     Secure Memory Encryption support
> +	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
> +	 *     Pagetable bit position used to indicate encryption
> +	 */
> +	eax = 0x8000001f;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if (!(eax & 1))
> +		goto out;
> +
> +	/* Check if SME is enabled */
> +	msr = native_read_msr(MSR_K8_SYSCFG);

This native_read_msr() wankery is adding this check:

	if (msr_tracepoint_active(__tracepoint_read_msr))

and here it is clearly too early for tracepoints. Please use __rdmsr()
which is purely doing the MSR operation. (... and exception handling for
when the RDMSR itself raises an exception but we're very early here too
so the MSR better be there, otherwise we'll blow up).

> +	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
> +		goto out;
> +
> +	/*
> +	 * Fixups have not been to applied phys_base yet, so we must obtain

		...    not been applied to phys_base yet ...

> +	 * the address to the SME command line option in the following way.
> +	 */
> +	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) {
> +		asm ("lea sme_cmdline_arg_off(%%rip), %0"
> +		     : "=r" (cmdline_arg)
> +		     : "p" (sme_cmdline_arg_off));
> +		enable_if_found = false;
> +	} else {
> +		asm ("lea sme_cmdline_arg_on(%%rip), %0"
> +		     : "=r" (cmdline_arg)
> +		     : "p" (sme_cmdline_arg_on));
> +		enable_if_found = true;
> +	}
> +
> +	cmdline_ptr = bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32);
> +
> +	if (cmdline_find_option_bool((char *)cmdline_ptr, cmdline_arg))
> +		sme_me_mask = enable_if_found ? 1UL << (ebx & 0x3f) : 0;
> +	else
> +		sme_me_mask = enable_if_found ? 0 : 1UL << (ebx & 0x3f);

I have a better idea: you can copy __cmdline_find_option() +
cmdline_find_option() to arch/x86/lib/cmdline.c in a pre-patch. Then,
pass in a buffer and check for "on" and "off". This way you don't
have to misuse the _bool() variant for something which is actually
"option=argument".

Thanks.

-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ