[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4c1198e7-00c8-449e-bbb9-7dcf15e11cfe@amd.com>
Date: Fri, 18 Oct 2024 14:07:50 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: Tom Lendacky <thomas.lendacky@....com>, linux-kernel@...r.kernel.org,
x86@...nel.org
Cc: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>, Dave Hansen <dave.hansen@...ux.intel.com>,
Michael Roth <michael.roth@....com>, Ashish Kalra <ashish.kalra@....com>
Subject: Re: [PATCH v3 7/8] x86/sev: Add full support for a segmented RMP
table
>
> @@ -196,7 +203,42 @@ static void __init __snp_fixup_e820_tables(u64 pa)
> void __init snp_fixup_e820_tables(void)
> {
> __snp_fixup_e820_tables(probed_rmp_base);
> - __snp_fixup_e820_tables(probed_rmp_base + probed_rmp_size);
> +
> + if (RMP_IS_SEGMENTED(rmp_cfg)) {
> + unsigned long size;
> + unsigned int i;
> + u64 pa, *rst;
> +
> + pa = probed_rmp_base;
> + pa += RMPTABLE_CPU_BOOKKEEPING_SZ;
> + pa += RMP_SEGMENT_TABLE_SIZE;
> + __snp_fixup_e820_tables(pa);
> +
> + pa -= RMP_SEGMENT_TABLE_SIZE;
> + rst = early_memremap(pa, RMP_SEGMENT_TABLE_SIZE);
> + if (!rst)
> + return;
> +
> + for (i = 0; i < rst_max_index; i++) {
> + pa = RST_ENTRY_SEGMENT_BASE(rst[i]);
> + size = RST_ENTRY_MAPPED_SIZE(rst[i]);
> + if (!size)
> + continue;
> +
> + __snp_fixup_e820_tables(pa);
> +
> + /* Mapped size in GB */
> + size *= (1UL << 30);
nit: size <<= 30 ?
> + if (size > rmp_segment_coverage_size)
> + size = rmp_segment_coverage_size;
> +
> + __snp_fixup_e820_tables(pa + size);
I might have understood this wrong, but is this call meant to fixup segmented
rmp table end. So, is below is required?
size = PHYS_PFN(size);
size <<= 4;
__snp_fixup_e820_tables(pa + size);
> + }
> +
> + early_memunmap(rst, RMP_SEGMENT_TABLE_SIZE);
> + } else {
> + __snp_fixup_e820_tables(probed_rmp_base + probed_rmp_size);
> + }
> }
>
...
> +static bool __init segmented_rmptable_setup(void)
> +{
> + u64 rst_pa, *rst, pa, ram_pa_end, ram_pa_max;
> + unsigned int i, max_index;
> +
> + if (!probed_rmp_base)
> + return false;
> +
> + if (!alloc_rmp_segment_table())
> + return false;
> +
> + /* Map the RMP Segment Table */
> + rst_pa = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
> + rst = memremap(rst_pa, RMP_SEGMENT_TABLE_SIZE, MEMREMAP_WB);
> + if (!rst) {
> + pr_err("Failed to map RMP segment table addr %#llx\n", rst_pa);
> + goto e_free;
> + }
> +
> + /* Get the address for the end of system RAM */
> + ram_pa_max = max_pfn << PAGE_SHIFT;
> +
> + /* Process each RMP segment */
> + max_index = 0;
> + ram_pa_end = 0;
> + for (i = 0; i < rst_max_index; i++) {
> + u64 rmp_segment, rmp_size, mapped_size;
> +
> + mapped_size = RST_ENTRY_MAPPED_SIZE(rst[i]);
> + if (!mapped_size)
> + continue;
> +
> + max_index = i;
> +
> + /* Mapped size in GB */
> + mapped_size *= (1ULL << 30);
nit: mapped_size <<= 30 ?
> + if (mapped_size > rmp_segment_coverage_size)
> + mapped_size = rmp_segment_coverage_size;
> +
> + rmp_segment = RST_ENTRY_SEGMENT_BASE(rst[i]);
> +
> + rmp_size = PHYS_PFN(mapped_size);
> + rmp_size <<= 4;
> +
> + pa = (u64)i << rmp_segment_coverage_shift;
> +
> + /* Some segments may be for MMIO mapped above system RAM */
> + if (pa < ram_pa_max)
> + ram_pa_end = pa + mapped_size;
> +
> + if (!alloc_rmp_segment_desc(rmp_segment, rmp_size, pa))
> + goto e_unmap;
> +
> + pr_info("RMP segment %u physical address [%#llx - %#llx] covering [%#llx - %#llx]\n",
> + i, rmp_segment, rmp_segment + rmp_size - 1, pa, pa + mapped_size - 1);
> + }
> +
> + if (ram_pa_max > ram_pa_end) {
> + pr_err("Segmented RMP does not cover full system RAM (expected 0x%llx got 0x%llx)\n",
> + ram_pa_max, ram_pa_end);
> + goto e_unmap;
> + }
> +
> + /* Adjust the maximum index based on the found segments */
> + rst_max_index = max_index + 1;
> +
> + memunmap(rst);
> +
> + return true;
> +
> +e_unmap:
> + memunmap(rst);
> +
> +e_free:
> + free_rmp_segment_table();
> +
> + return false;
> +}
> +
...
>
> +static bool probe_segmented_rmptable_info(void)
> +{
> + unsigned int eax, ebx, segment_shift, segment_shift_min, segment_shift_max;
> + u64 rmp_base, rmp_end;
> +
> + rdmsrl(MSR_AMD64_RMP_BASE, rmp_base);
> + rdmsrl(MSR_AMD64_RMP_END, rmp_end);
> +
> + if (!(rmp_base & RMP_ADDR_MASK)) {
> + pr_err("Memory for the RMP table has not been reserved by BIOS\n");
> + return false;
> + }
> +
> + WARN_ONCE(rmp_end & RMP_ADDR_MASK,
> + "Segmented RMP enabled but RMP_END MSR is non-zero\n");
> +
> + /* Obtain the min and max supported RMP segment size */
> + eax = cpuid_eax(0x80000025);
> + segment_shift_min = eax & GENMASK(5, 0);
> + segment_shift_max = (eax & GENMASK(11, 6)) >> 6;
> +
> + /* Verify the segment size is within the supported limits */
> + segment_shift = MSR_AMD64_RMP_SEGMENT_SHIFT(rmp_cfg);
> + if (segment_shift > segment_shift_max || segment_shift < segment_shift_min) {
> + pr_err("RMP segment size (%u) is not within advertised bounds (min=%u, max=%u)\n",
> + segment_shift, segment_shift_min, segment_shift_max);
> + return false;
> + }
> +
> + /* Override the max supported RST index if a hardware limit exists */
> + ebx = cpuid_ebx(0x80000025);
> + if (ebx & BIT(10))
> + rst_max_index = ebx & GENMASK(9, 0);
> +
> + set_rmp_segment_info(segment_shift);
> +
> + probed_rmp_base = rmp_base;
> + probed_rmp_size = 0;
> +
> + pr_info("RMP segment table physical address [0x%016llx - 0x%016llx]\n",
> + rmp_base, rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ + RMP_SEGMENT_TABLE_SIZE);
> +
rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ, rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ + RMP_SEGMENT_TABLE_SIZE);
- Neeraj
> + return true;
> +}
> +
Powered by blists - more mailing lists