lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <df3cd8a6-8e9e-2403-9995-267cd05b0fda@amd.com>
Date: Mon, 15 Apr 2024 18:12:44 +0200
From: "Gupta, Pankaj" <pankaj.gupta@....com>
To: Tom Lendacky <thomas.lendacky@....com>, linux-kernel@...r.kernel.org,
 x86@...nel.org, linux-coco@...ts.linux.dev, svsm-devel@...onut-svsm.dev
Cc: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
 Borislav Petkov <bp@...en8.de>, Dave Hansen <dave.hansen@...ux.intel.com>,
 "H. Peter Anvin" <hpa@...or.com>, Andy Lutomirski <luto@...nel.org>,
 Peter Zijlstra <peterz@...radead.org>,
 Dan Williams <dan.j.williams@...el.com>, Michael Roth
 <michael.roth@....com>, Ashish Kalra <ashish.kalra@....com>
Subject: Re: [PATCH v3 07/14] x86/sev: Provide SVSM discovery support

On 3/25/2024 11:26 PM, Tom Lendacky wrote:
> The SVSM specification documents an alternative method of discovery for
> the SVSM using a reserved CPUID bit and a reserved MSR.
> 
> For the CPUID support, the SNP CPUID table is updated to set bit 28 of
> the EAX register of the 0x8000001f leaf when an SVSM is present. This bit
> has been reserved for use in this capacity.
> 
> For the MSR support, a new reserved MSR 0xc001f000 has been defined. A #VC
> should be generated when accessing this MSR. The #VC handler is expected
> to ignore writes to this MSR and return the physical calling area address
> (CAA) on reads of this MSR.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>

Reviewed-by: Pankaj Gupta <pankaj.gupta@....com>

> ---
>   arch/x86/include/asm/cpufeatures.h |  1 +
>   arch/x86/include/asm/msr-index.h   |  2 ++
>   arch/x86/kernel/sev-shared.c       | 11 +++++++++++
>   arch/x86/kernel/sev.c              | 17 +++++++++++++++++
>   4 files changed, 31 insertions(+)
> 
> diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
> index a38f8f9ba657..51e7c879f057 100644
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -446,6 +446,7 @@
>   #define X86_FEATURE_V_TSC_AUX		(19*32+ 9) /* "" Virtual TSC_AUX */
>   #define X86_FEATURE_SME_COHERENT	(19*32+10) /* "" AMD hardware-enforced cache coherency */
>   #define X86_FEATURE_DEBUG_SWAP		(19*32+14) /* AMD SEV-ES full debug state swap support */
> +#define X86_FEATURE_SVSM_PRESENT	(19*32+28) /* "" SNP SVSM is present */
>   
>   /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
>   #define X86_FEATURE_NO_NESTED_DATA_BP	(20*32+ 0) /* "" No Nested Data Breakpoints */
> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
> index 05956bd8bacf..cc4de3379265 100644
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -654,6 +654,8 @@
>   #define MSR_AMD64_RMP_BASE		0xc0010132
>   #define MSR_AMD64_RMP_END		0xc0010133
>   
> +#define MSR_SVSM_CAA			0xc001f000
> +
>   /* AMD Collaborative Processor Performance Control MSRs */
>   #define MSR_AMD_CPPC_CAP1		0xc00102b0
>   #define MSR_AMD_CPPC_ENABLE		0xc00102b1
> diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
> index e63c0a6eccd6..17eb42c4ae71 100644
> --- a/arch/x86/kernel/sev-shared.c
> +++ b/arch/x86/kernel/sev-shared.c
> @@ -1559,6 +1559,8 @@ static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
>   static void __head setup_svsm_ca(const struct cc_blob_sev_info *cc_info)
>   {
>   	struct snp_secrets_page_layout *secrets_page;
> +	struct snp_cpuid_table *cpuid_table;
> +	unsigned int i;
>   	u64 caa;
>   
>   	BUILD_BUG_ON(sizeof(*secrets_page) != PAGE_SIZE);
> @@ -1596,4 +1598,13 @@ static void __head setup_svsm_ca(const struct cc_blob_sev_info *cc_info)
>   	 */
>   	boot_svsm_caa = (struct svsm_ca *)caa;
>   	boot_svsm_caa_pa = caa;
> +
> +	/* Advertise the SVSM presence via CPUID. */
> +	cpuid_table = (struct snp_cpuid_table *)snp_cpuid_get_table();
> +	for (i = 0; i < cpuid_table->count; i++) {
> +		struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
> +
> +		if (fn->eax_in == 0x8000001f)
> +			fn->eax |= BIT(28);
> +	}
>   }
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index ea8b43a0f01b..7f399ea87a3c 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -1347,12 +1347,29 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
>   	return 0;
>   }
>   
> +static enum es_result vc_handle_svsm_caa_msr(struct es_em_ctxt *ctxt)
> +{
> +	struct pt_regs *regs = ctxt->regs;
> +
> +	/* Writes to the SVSM CAA msr are ignored */
> +	if (ctxt->insn.opcode.bytes[1] == 0x30)
> +		return ES_OK;
> +
> +	regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
> +	regs->dx = upper_32_bits(this_cpu_read(svsm_caa_pa));
> +
> +	return ES_OK;
> +}
> +
>   static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
>   {
>   	struct pt_regs *regs = ctxt->regs;
>   	enum es_result ret;
>   	u64 exit_info_1;
>   
> +	if (regs->cx == MSR_SVSM_CAA)
> +		return vc_handle_svsm_caa_msr(ctxt);
> +
>   	/* Is it a WRMSR? */
>   	exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
>   


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ