lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <de69eefa-0da3-4142-bcd0-257248c403f1@amd.com>
Date: Fri, 25 Oct 2024 06:51:43 -0500
From: "Kalra, Ashish" <ashish.kalra@....com>
To: Tom Lendacky <thomas.lendacky@....com>, linux-kernel@...r.kernel.org,
 x86@...nel.org
Cc: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
 Borislav Petkov <bp@...en8.de>, Dave Hansen <dave.hansen@...ux.intel.com>,
 Michael Roth <michael.roth@....com>, Nikunj A Dadhania <nikunj@....com>,
 Neeraj Upadhyay <Neeraj.Upadhyay@....com>
Subject: Re: [PATCH v4 6/8] x86/sev: Treat the contiguous RMP table as a
 single RMP segment


On 10/23/2024 1:42 PM, Tom Lendacky wrote:
> In preparation for support of a segmented RMP table, treat the contiguous
> RMP table as a segmented RMP table with a single segment covering all
> of memory. By treating a contiguous RMP table as a single segment, much
> of the code that initializes and accesses the RMP can be re-used.
> 
> Segmented RMP tables can have up to 512 segment entries. Each segment
> will have metadata associated with it to identify the segment location,
> the segment size, etc. The segment data and the physical address are used
> to determine the index of the segment within the table and then the RMP
> entry within the segment. For an actual segmented RMP table environment,
> much of the segment information will come from a configuration MSR. For
> the contiguous RMP, though, much of the information will be statically
> defined.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
> ---
>  arch/x86/virt/svm/sev.c | 193 ++++++++++++++++++++++++++++++++++++----
>  1 file changed, 174 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
> index dd256e76e443..043b2582e10e 100644
> --- a/arch/x86/virt/svm/sev.c
> +++ b/arch/x86/virt/svm/sev.c
> @@ -18,6 +18,7 @@
>  #include <linux/cpumask.h>
>  #include <linux/iommu.h>
>  #include <linux/amd-iommu.h>
> +#include <linux/nospec.h>
>  
>  #include <asm/sev.h>
>  #include <asm/processor.h>
> @@ -74,12 +75,42 @@ struct rmpentry {
>   */
>  #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
>  
> +/*
> + * For a non-segmented RMP table, use the maximum physical addressing as the
> + * segment size in order to always arrive at index 0 in the table.
> + */
> +#define RMPTABLE_NON_SEGMENTED_SHIFT	52
> +
> +struct rmp_segment_desc {
> +	struct rmpentry *rmp_entry;
> +	u64 max_index;
> +	u64 size;
> +};
> +
> +/*
> + * Segmented RMP Table support.
> + *   - The segment size is used for two purposes:
> + *     - Identify the amount of memory covered by an RMP segment
> + *     - Quickly locate an RMP segment table entry for a physical address
> + *
> + *   - The RMP segment table contains pointers to an RMP table that covers
> + *     a specific portion of memory. There can be up to 512 8-byte entries,
> + *     one pages worth.
> + */
> +static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
> +static unsigned int rst_max_index __ro_after_init = 512;
> +
> +static u64 rmp_segment_size_max;
> +static unsigned int rmp_segment_coverage_shift;
> +static u64 rmp_segment_coverage_size;
> +static u64 rmp_segment_coverage_mask;
> +#define RST_ENTRY_INDEX(x)	((x) >> rmp_segment_coverage_shift)
> +#define RMP_ENTRY_INDEX(x)	((u64)(PHYS_PFN((x) & rmp_segment_coverage_mask)))
> +
>  /* Mask to apply to a PFN to get the first PFN of a 2MB page */
>  #define PFN_PMD_MASK	GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
>  
>  static u64 probed_rmp_base, probed_rmp_size;
> -static struct rmpentry *rmptable __ro_after_init;
> -static u64 rmptable_max_pfn __ro_after_init;
>  
>  static LIST_HEAD(snp_leaked_pages_list);
>  static DEFINE_SPINLOCK(snp_leaked_pages_list_lock);
> @@ -185,6 +216,92 @@ static bool __init init_rmptable_bookkeeping(void)
>  	return true;
>  }
>  
> +static bool __init alloc_rmp_segment_desc(u64 segment_pa, u64 segment_size, u64 pa)
> +{
> +	struct rmp_segment_desc *desc;
> +	void *rmp_segment;
> +	u64 rst_index;
> +
> +	/* Validate the RMP segment size */
> +	if (segment_size > rmp_segment_size_max) {
> +		pr_err("Invalid RMP size (%#llx) for configured segment size (%#llx)\n",
> +		       segment_size, rmp_segment_size_max);
> +		return false;
> +	}
> +
> +	/* Validate the RMP segment table index */
> +	rst_index = RST_ENTRY_INDEX(pa);
> +	if (rst_index >= rst_max_index) {
> +		pr_err("Invalid RMP segment base address (%#llx) for configured segment size (%#llx)\n",
> +		       pa, rmp_segment_coverage_size);
> +		return false;
> +	}
> +	rst_index = array_index_nospec(rst_index, rst_max_index);
> +
> +	if (rmp_segment_table[rst_index]) {
> +		pr_err("RMP segment descriptor already exists at index %llu\n", rst_index);
> +		return false;
> +	}
> +
> +	/* Map the RMP entries */
> +	rmp_segment = memremap(segment_pa, segment_size, MEMREMAP_WB);
> +	if (!rmp_segment) {
> +		pr_err("Failed to map RMP segment addr 0x%llx size 0x%llx\n",
> +		       segment_pa, segment_size);
> +		return false;
> +	}
> +
> +	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
> +	if (!desc) {
> +		memunmap(rmp_segment);
> +		return false;
> +	}
> +
> +	desc->rmp_entry = rmp_segment;
> +	desc->max_index = segment_size / sizeof(*desc->rmp_entry);
> +	desc->size = segment_size;
> +
> +	/* Add the segment descriptor to the table */
> +	rmp_segment_table[rst_index] = desc;
> +
> +	return true;
> +}
> +
> +static void __init free_rmp_segment_table(void)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < rst_max_index; i++) {
> +		struct rmp_segment_desc *desc;
> +
> +		desc = rmp_segment_table[i];
> +		if (!desc)
> +			continue;
> +
> +		memunmap(desc->rmp_entry);
> +
> +		kfree(desc);
> +	}
> +
> +	free_page((unsigned long)rmp_segment_table);
> +
> +	rmp_segment_table = NULL;
> +}
> +
> +static bool __init alloc_rmp_segment_table(void)
> +{
> +	struct page *page;
> +
> +	/* Allocate the table used to index into the RMP segments */
> +	page = alloc_page(__GFP_ZERO);
> +	if (!page)
> +		return false;
> +
> +	rmp_segment_table = page_address(page);
> +
> +	return true;
> +}
> +
>  /*
>   * Do the necessary preparations which are verified by the firmware as
>   * described in the SNP_INIT_EX firmware command description in the SNP
> @@ -192,8 +309,8 @@ static bool __init init_rmptable_bookkeeping(void)
>   */
>  static int __init snp_rmptable_init(void)
>  {
> -	u64 max_rmp_pfn, calc_rmp_sz, rmptable_size, rmp_end, val;
> -	void *rmptable_start;
> +	u64 max_rmp_pfn, calc_rmp_sz, rmptable_segment, rmptable_size, rmp_end, val;
> +	unsigned int i;
>  
>  	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
>  		return 0;
> @@ -222,17 +339,18 @@ static int __init snp_rmptable_init(void)
>  		goto nosnp;
>  	}
>  
> +	if (!alloc_rmp_segment_table())
> +		goto nosnp;
> +
>  	/* Map only the RMP entries */
> -	rmptable_start = memremap(probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ,
> -				  probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ,
> -				  MEMREMAP_WB);
> -	if (!rmptable_start) {
> -		pr_err("Failed to map RMP table\n");
> +	rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
> +	rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
> +
> +	if (!alloc_rmp_segment_desc(rmptable_segment, rmptable_size, 0)) {
> +		free_rmp_segment_table();
>  		goto nosnp;
>  	}
>  
> -	rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
> -
>  	/*
>  	 * Check if SEV-SNP is already enabled, this can happen in case of
>  	 * kexec boot.
> @@ -243,12 +361,20 @@ static int __init snp_rmptable_init(void)
>  
>  	/* Zero out the RMP bookkeeping area */
>  	if (!init_rmptable_bookkeeping()) {
> -		memunmap(rmptable_start);
> +		free_rmp_segment_table();
>  		goto nosnp;
>  	}
>  
>  	/* Zero out the RMP entries */
> -	memset(rmptable_start, 0, rmptable_size);
> +	for (i = 0; i < rst_max_index; i++) {
> +		struct rmp_segment_desc *desc;
> +
> +		desc = rmp_segment_table[i];
> +		if (!desc)
> +			continue;
> +
> +		memset(desc->rmp_entry, 0, desc->size);
> +	}
>  
>  	/* Flush the caches to ensure that data is written before SNP is enabled. */
>  	wbinvd_on_all_cpus();
> @@ -259,9 +385,6 @@ static int __init snp_rmptable_init(void)
>  	on_each_cpu(snp_enable, NULL, 1);
>  
>  skip_enable:
> -	rmptable = (struct rmpentry *)rmptable_start;
> -	rmptable_max_pfn = rmptable_size / sizeof(struct rmpentry) - 1;
> -
>  	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/rmptable_init:online", __snp_enable, NULL);
>  
>  	/*
> @@ -282,6 +405,17 @@ static int __init snp_rmptable_init(void)
>   */
>  device_initcall(snp_rmptable_init);
>  
> +static void set_rmp_segment_info(unsigned int segment_shift)
> +{
> +	rmp_segment_coverage_shift = segment_shift;
> +	rmp_segment_coverage_size  = 1ULL << rmp_segment_coverage_shift;
> +	rmp_segment_coverage_mask  = rmp_segment_coverage_size - 1;
> +
> +	/* Calculate the maximum size an RMP can be (16 bytes/page mapped) */
> +	rmp_segment_size_max = PHYS_PFN(rmp_segment_coverage_size);
> +	rmp_segment_size_max <<= 4;
> +}
> +
>  #define RMP_ADDR_MASK GENMASK_ULL(51, 13)
>  
>  bool snp_probe_rmptable_info(void)
> @@ -303,6 +437,11 @@ bool snp_probe_rmptable_info(void)
>  
>  	rmp_sz = rmp_end - rmp_base + 1;
>  
> +	/* Treat the contiguous RMP table as a single segment */
> +	rst_max_index = 1;
> +
> +	set_rmp_segment_info(RMPTABLE_NON_SEGMENTED_SHIFT);
> +
>  	probed_rmp_base = rmp_base;
>  	probed_rmp_size = rmp_sz;
>  
> @@ -314,13 +453,29 @@ bool snp_probe_rmptable_info(void)
>  
>  static struct rmpentry *__get_rmpentry(u64 pfn)
>  {
> -	if (!rmptable)
> +	struct rmp_segment_desc *desc;
> +	u64 paddr, rst_index, segment_index;
> +
> +	if (!rmp_segment_table)
>  		return ERR_PTR(-ENODEV);
>  
> -	if (unlikely(pfn > rmptable_max_pfn))
> +	paddr = pfn << PAGE_SHIFT;
> +
> +	rst_index = RST_ENTRY_INDEX(paddr);
> +	if (unlikely(rst_index >= rst_max_index))
> +		return ERR_PTR(-EFAULT);
> +	rst_index = array_index_nospec(rst_index, rst_max_index);
> +
> +	desc = rmp_segment_table[rst_index];
> +	if (unlikely(!desc))
>  		return ERR_PTR(-EFAULT);
>  
> -	return rmptable + pfn;
> +	segment_index = RMP_ENTRY_INDEX(paddr);
> +	if (unlikely(segment_index >= desc->max_index))
> +		return ERR_PTR(-EFAULT);
> +	segment_index = array_index_nospec(segment_index, desc->max_index);
> +
> +	return desc->rmp_entry + segment_index;
>  }
>  

Ah ok, __get_rmpentry() is finalized here in this patch and rmptable & rmptable_max_pfn 
are removed as part of this patch, so probably my comments on patch #1 are not relevant. 

This is somewhat confusing, maybe some comments can be added in patch #1 to explain that
__get_rmpentry() will be finalized in this patch ?

My confusion results from the following : 

Patch #1: 
__get_rmpentry() 
--- refers to rmptable.

Patch #5:
rmptable initialized here.

Patch #6:
rmptable not required and removed in this patch.
do final cleanup for __get_rmpentry(). 

Thanks,
Ashish

>  static int get_rmpentry(u64 pfn, struct rmpread *entry)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ