[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y/9AEyiMLKfj+/mK@notebook>
Date: Wed, 1 Mar 2023 13:07:47 +0100
From: Tom Dohrmann <erbse.13@....de>
To: Michael Roth <michael.roth@....com>
Cc: kvm@...r.kernel.org, linux-coco@...ts.linux.dev,
linux-mm@...ck.org, linux-crypto@...r.kernel.org, x86@...nel.org,
linux-kernel@...r.kernel.org, tglx@...utronix.de, mingo@...hat.com,
jroedel@...e.de, thomas.lendacky@....com, hpa@...or.com,
ardb@...nel.org, pbonzini@...hat.com, seanjc@...gle.com,
vkuznets@...hat.com, jmattson@...gle.com, luto@...nel.org,
dave.hansen@...ux.intel.com, slp@...hat.com, pgonda@...gle.com,
peterz@...radead.org, srinivas.pandruvada@...ux.intel.com,
rientjes@...gle.com, dovmurik@...ux.ibm.com, tobin@....com,
bp@...en8.de, vbabka@...e.cz, kirill@...temov.name,
ak@...ux.intel.com, tony.luck@...el.com, marcorr@...gle.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, alpergun@...gle.com,
dgilbert@...hat.com, jarkko@...nel.org, ashish.kalra@....com,
nikunj.dadhania@....com, Brijesh Singh <brijesh.singh@....com>
Subject: Re: [PATCH RFC v8 15/56] x86/sev: Invalidate pages from the direct
map when adding them to the RMP table
On Mon, Feb 20, 2023 at 12:38:06PM -0600, Michael Roth wrote:
> From: Brijesh Singh <brijesh.singh@....com>
>
> The integrity guarantee of SEV-SNP is enforced through the RMP table.
> The RMP is used with standard x86 and IOMMU page tables to enforce
> memory restrictions and page access rights. The RMP check is enforced as
> soon as SEV-SNP is enabled globally in the system. When hardware
> encounters an RMP-check failure, it raises a page-fault exception.
>
> The rmp_make_private() and rmp_make_shared() helpers are used to add
> or remove the pages from the RMP table. Improve the rmp_make_private()
> to invalidate state so that pages cannot be used in the direct-map after
> they are added the RMP table, and restored to their default valid
> permission after the pages are removed from the RMP table.
>
> Co-developed-by: Ashish Kalra <ashish.kalra@....com>
> Signed-off-by: Ashish Kalra <ashish.kalra@....com>
> Signed-off-by: Brijesh Singh <brijesh.singh@....com>
> Signed-off-by: Michael Roth <michael.roth@....com>
> ---
> arch/x86/kernel/sev.c | 57 +++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 57 insertions(+)
>
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index a49f30c10dc1..3e5ff5934e83 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -2595,6 +2595,37 @@ int psmash(u64 pfn)
> }
> EXPORT_SYMBOL_GPL(psmash);
>
> +static int restore_direct_map(u64 pfn, int npages)
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < npages; i++) {
> + ret = set_direct_map_default_noflush(pfn_to_page(pfn + i));
> + if (ret)
> + goto cleanup;
> + }
> +
> +cleanup:
> + WARN(ret > 0, "Failed to restore direct map for pfn 0x%llx\n", pfn + i);
> + return ret;
> +}
> +
> +static int invalidate_direct_map(u64 pfn, int npages)
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < npages; i++) {
> + ret = set_direct_map_invalid_noflush(pfn_to_page(pfn + i));
> + if (ret)
> + goto cleanup;
> + }
> +
> +cleanup:
> + WARN(ret > 0, "Failed to invalidate direct map for pfn 0x%llx\n", pfn + i);
> + restore_direct_map(pfn, i);
This immediately restores the direct map after invalidating it. It
probably needs to put behind if(ret).
Regards, Tom
> + return ret;
> +}
> +
> static int rmpupdate(u64 pfn, struct rmp_state *val)
> {
> int max_attempts = 4 * num_present_cpus();
> @@ -2605,6 +2636,21 @@ static int rmpupdate(u64 pfn, struct rmp_state *val)
> if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> return -ENXIO;
>
> + level = RMP_TO_X86_PG_LEVEL(val->pagesize);
> + npages = page_level_size(level) / PAGE_SIZE;
> +
> + /*
> + * If page is getting assigned in the RMP table then unmap it from the
> + * direct map.
> + */
> + if (val->assigned) {
> + if (invalidate_direct_map(pfn, npages)) {
> + pr_err("Failed to unmap %d pages at pfn 0x%llx from the direct_map\n",
> + npages, pfn);
> + return -EFAULT;
> + }
> + }
> +
> do {
> /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
> asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
> @@ -2630,6 +2676,17 @@ static int rmpupdate(u64 pfn, struct rmp_state *val)
> attempts, val->asid, ret, pfn, npages);
> }
>
> + /*
> + * Restore the direct map after the page is removed from the RMP table.
> + */
> + if (!val->assigned) {
> + if (restore_direct_map(pfn, npages)) {
> + pr_err("Failed to map %d pages at pfn 0x%llx into the direct_map\n",
> + npages, pfn);
> + return -EFAULT;
> + }
> + }
> +
> return 0;
> }
>
> --
> 2.25.1
>
Powered by blists - more mailing lists