[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170930115604.w4g3oplmyo5oyjeq@pd.tnic>
Date: Sat, 30 Sep 2017 13:56:04 +0200
From: Borislav Petkov <bp@...e.de>
To: Brijesh Singh <brijesh.singh@....com>,
Tom Lendacky <thomas.lendacky@....com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
kvm@...r.kernel.org, x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [PATCH] x86/CPU/AMD, mm: Extend with mem_encrypt=sme option
On Fri, Sep 29, 2017 at 06:06:52PM -0500, Brijesh Singh wrote:
> The mem_encrypt=on activates both SME and SEV. Add a new argument to disable
> the SEV and allow SME. The argument can be useful when SEV has issues and
> we want to disable it.
>
> early_detect_mem_encrypt() [cpu/amd.com] will need to know the state of
> the mem_encrypt= argument. Since early_detect_mem_encrypt() is not defined
> as __init hence we are not able to use the 'boot_command_line' variable to
> parse the cmdline argument. We introduce a new function me_cmdline_state()
> to get the cmdline state from mem_encrypt.c.
>
> Cc: Thomas Gleixner <tglx@...utronix.de>
> Cc: Ingo Molnar <mingo@...hat.com>
> Cc: "H. Peter Anvin" <hpa@...or.com>
> Cc: Paolo Bonzini <pbonzini@...hat.com>
> Cc: "Radim Krčmář" <rkrcmar@...hat.com>
> Cc: Borislav Petkov <bp@...e.de>
> Cc: kvm@...r.kernel.org
> Cc: x86@...nel.org
> Cc: linux-kernel@...r.kernel.org
> Cc: Tom Lendacky <thomas.lendacky@....com>
> Signed-off-by: Brijesh Singh <brijesh.singh@....com>
> ---
Ok, I went and simplified this whole code path a bit because it was
needlessly a bit too complex. Below is the result, only compile-tested.
Brijesh, Tom, guys, please check my logic, I might've missed a case.
Thanks.
---
From: Borislav Petkov <bp@...e.de>
Date: Sat, 30 Sep 2017 13:33:26 +0200
Subject: [PATCH] x86/CPU/AMD, mm: Extend with mem_encrypt=sme option
Extend the mem_encrypt= cmdline option with the "sme" argument so that
one can enable SME only (i.e., this serves as a SEV chicken bit). While
at it, streamline and document the flow logic here:
1. Check whether the SME CPUID leaf is present
2. Check whether the HW has enabled SME/SEV
3. Only *then* look at any potential command line params because doing
so before is pointless.
3.1 mem_encrypt=on - enable both SME/SEV
3.2 mem_encrypt=sme - enable only SME
3.3 mem_encrypt=off - disable both
In addition, CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT enables both if
the kernel is built with it enabled.
While at it, shorten variable names, simplify code flow.
This is based on a patch by Brijesh Singh <brijesh.singh@....com>.
Signed-off-by: Borislav Petkov <bp@...e.de>
Cc: Brijesh Singh <brijesh.singh@....com>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: kvm@...r.kernel.org
Cc: x86@...nel.org
---
arch/x86/include/asm/mem_encrypt.h | 2 +
arch/x86/kernel/cpu/amd.c | 6 +++
arch/x86/mm/mem_encrypt.c | 82 +++++++++++++++++++-------------------
3 files changed, 49 insertions(+), 41 deletions(-)
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 3ba68c92be1b..175310f00202 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -19,6 +19,8 @@
#include <asm/bootparam.h>
+extern bool sev_enabled;
+
#ifdef CONFIG_AMD_MEM_ENCRYPT
extern u64 sme_me_mask;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c1234aa0550c..d0669f3966a6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -13,6 +13,7 @@
#include <asm/smp.h>
#include <asm/pci-direct.h>
#include <asm/delay.h>
+#include <asm/mem_encrypt.h>
#ifdef CONFIG_X86_64
# include <asm/mmconfig.h>
@@ -32,6 +33,8 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
*/
static u32 nodes_per_socket = 1;
+bool sev_enabled __section(.data) = false;
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
@@ -588,6 +591,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
if (IS_ENABLED(CONFIG_X86_32))
goto clear_all;
+ if (!sev_enabled)
+ goto clear_sev;
+
rdmsrl(MSR_K7_HWCR, msr);
if (!(msr & MSR_K7_HWCR_SMMLOCK))
goto clear_sev;
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 057417a3d9b4..9b83bc1be7c0 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -27,12 +27,14 @@
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/cmdline.h>
+#include <asm/mem_encrypt.h>
#include "mm_internal.h"
-static char sme_cmdline_arg[] __initdata = "mem_encrypt";
-static char sme_cmdline_on[] __initdata = "on";
-static char sme_cmdline_off[] __initdata = "off";
+static char sme_cmd[] __initdata = "mem_encrypt";
+static char sme_cmd_on[] __initdata = "on";
+static char sme_cmd_off[] __initdata = "off";
+static char sme_cmd_sme[] __initdata = "sme";
/*
* Since SME related variables are set early in the boot process they must
@@ -44,8 +46,6 @@ EXPORT_SYMBOL_GPL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(__sev);
EXPORT_SYMBOL_GPL(__sev);
-static bool sev_enabled __section(.data) = false;
-
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -768,13 +768,13 @@ void __init sme_encrypt_kernel(void)
void __init __nostackprotector sme_enable(struct boot_params *bp)
{
- const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+ const char *cmdline_ptr, *cmd, *cmd_on, *cmd_off, *cmd_sme;
unsigned int eax, ebx, ecx, edx;
unsigned long feature_mask;
- bool active_by_default;
- unsigned long me_mask;
+ u64 me_mask, msr;
char buffer[16];
- u64 msr;
+ bool sme_only;
+ int ret;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
@@ -808,55 +808,55 @@ void __init __nostackprotector sme_enable(struct boot_params *bp)
if (!(eax & feature_mask))
return;
- me_mask = 1UL << (ebx & 0x3f);
-
- /* Check if memory encryption is enabled */
+ /* For SME, check the SYSCFG MSR */
if (feature_mask == AMD_SME_BIT) {
- /* For SME, check the SYSCFG MSR */
msr = __rdmsr(MSR_K8_SYSCFG);
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
return;
- } else {
- /* For SEV, check the SEV MSR */
+ }
+
+ /* For SEV, check the SEV MSR */
+ if (feature_mask == AMD_SEV_BIT) {
msr = __rdmsr(MSR_AMD64_SEV);
if (!(msr & MSR_AMD64_SEV_ENABLED))
return;
-
- /* SEV state cannot be controlled by a command line option */
- sme_me_mask = me_mask;
- sev_enabled = true;
- return;
}
+ me_mask = BIT_ULL(ebx & 0x3f);
+
/*
* Fixups have not been applied to phys_base yet and we're running
* identity mapped, so we must obtain the address to the SME command
* line argument data using rip-relative addressing.
*/
- asm ("lea sme_cmdline_arg(%%rip), %0"
- : "=r" (cmdline_arg)
- : "p" (sme_cmdline_arg));
- asm ("lea sme_cmdline_on(%%rip), %0"
- : "=r" (cmdline_on)
- : "p" (sme_cmdline_on));
- asm ("lea sme_cmdline_off(%%rip), %0"
- : "=r" (cmdline_off)
- : "p" (sme_cmdline_off));
-
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
- active_by_default = true;
- else
- active_by_default = false;
+ asm ("lea sme_cmd(%%rip), %0" : "=r" (cmd) : "p" (sme_cmd));
+ asm ("lea sme_cmd_on(%%rip), %0" : "=r" (cmd_on) : "p" (sme_cmd_on));
+ asm ("lea sme_cmd_off(%%rip), %0" : "=r" (cmd_off) : "p" (sme_cmd_off));
+ asm ("lea sme_cmd_sme(%%rip), %0" : "=r" (cmd_sme) : "p" (sme_cmd_sme));
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
- ((u64)bp->ext_cmd_line_ptr << 32));
+ ((u64)bp->ext_cmd_line_ptr << 32));
- cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
+ ret = cmdline_find_option(cmdline_ptr, cmd, buffer, sizeof(buffer));
+ if (ret < 0)
+ return;
- if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
- sme_me_mask = me_mask;
- else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+ if (!strncmp(buffer, cmd_off, sizeof(buffer))) {
sme_me_mask = 0;
- else
- sme_me_mask = active_by_default ? me_mask : 0;
+ return;
+ } else if (!strncmp(buffer, cmd_on, sizeof(buffer))) {
+ sme_me_mask = me_mask;
+ } else if (!strncmp(buffer, cmd_sme, sizeof(buffer))) {
+ sme_only = true;
+ }
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
+ sme_me_mask = me_mask;
+
+ if (sme_only)
+ return;
+
+ /* For SEV, check the SEV MSR */
+ if (feature_mask == AMD_SEV_BIT)
+ sev_enabled = true;
}
--
2.13.0
SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 (AG Nürnberg)
--
Powered by blists - more mailing lists