[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d872728a-e223-8bcd-7652-7dbe38d93802@amd.com>
Date: Wed, 9 Jul 2025 13:24:25 -0500
From: Tom Lendacky <thomas.lendacky@....com>
To: Ard Biesheuvel <ardb+git@...gle.com>, linux-kernel@...r.kernel.org
Cc: linux-efi@...r.kernel.org, x86@...nel.org,
Ard Biesheuvel <ardb@...nel.org>, Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...nel.org>, Dionna Amalie Glaze
<dionnaglaze@...gle.com>, Kevin Loughlin <kevinloughlin@...gle.com>,
Josh Poimboeuf <jpoimboe@...nel.org>, Peter Zijlstra <peterz@...radead.org>
Subject: Re: [PATCH v4 08/24] x86/sev: Share implementation of MSR-based page
state change
On 7/9/25 03:08, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@...nel.org>
>
> Both the decompressor and the SEV startup code implement the exact same
> sequence for invoking the MSR based communication protocol to effectuate
> a page state change.
>
> Before tweaking the internal APIs used in both versions, merge them and
> share them so those tweaks are only needed in a single place.
I think you can keep the save and restore of the MSR in the the combined
code so that you don't need the previous patch and that will keep
everything safe. We should be doing a minimal amount of MSR protocol
page state changes, so it really shouldn't have much effect.
Thanks,
Tom
>
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> ---
> arch/x86/boot/compressed/sev.c | 34 ++------------------
> arch/x86/boot/startup/sev-shared.c | 29 +++++++++++++++++
> arch/x86/boot/startup/sev-startup.c | 29 +----------------
> 3 files changed, 33 insertions(+), 59 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
> index f00f68175f14..6d3ed7ed03a4 100644
> --- a/arch/x86/boot/compressed/sev.c
> +++ b/arch/x86/boot/compressed/sev.c
> @@ -60,34 +60,6 @@ static bool sev_snp_enabled(void)
> return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
> }
>
> -static void __page_state_change(unsigned long paddr, enum psc_op op)
> -{
> - u64 val, msr;
> -
> - /*
> - * If private -> shared then invalidate the page before requesting the
> - * state change in the RMP table.
> - */
> - if (op == SNP_PAGE_STATE_SHARED)
> - pvalidate_4k_page(paddr, paddr, false);
> -
> - /* Issue VMGEXIT to change the page state in RMP table. */
> - sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
> - VMGEXIT();
> -
> - /* Read the response of the VMGEXIT. */
> - val = sev_es_rd_ghcb_msr();
> - if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
> - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
> -
> - /*
> - * Now that page state is changed in the RMP table, validate it so that it is
> - * consistent with the RMP entry.
> - */
> - if (op == SNP_PAGE_STATE_PRIVATE)
> - pvalidate_4k_page(paddr, paddr, true);
> -}
> -
> void snp_set_page_private(unsigned long paddr)
> {
> u64 msr;
> @@ -96,7 +68,7 @@ void snp_set_page_private(unsigned long paddr)
> return;
>
> msr = sev_es_rd_ghcb_msr();
> - __page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
> + __page_state_change(paddr, paddr, SNP_PAGE_STATE_PRIVATE);
> sev_es_wr_ghcb_msr(msr);
> }
>
> @@ -108,7 +80,7 @@ void snp_set_page_shared(unsigned long paddr)
> return;
>
> msr = sev_es_rd_ghcb_msr();
> - __page_state_change(paddr, SNP_PAGE_STATE_SHARED);
> + __page_state_change(paddr, paddr, SNP_PAGE_STATE_SHARED);
> sev_es_wr_ghcb_msr(msr);
> }
>
> @@ -137,7 +109,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
> u64 msr = sev_es_rd_ghcb_msr();
>
> for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
> - __page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
> + __page_state_change(pa, pa, SNP_PAGE_STATE_PRIVATE);
> sev_es_wr_ghcb_msr(msr);
> }
>
> diff --git a/arch/x86/boot/startup/sev-shared.c b/arch/x86/boot/startup/sev-shared.c
> index 7ca59038269f..f553268d31d7 100644
> --- a/arch/x86/boot/startup/sev-shared.c
> +++ b/arch/x86/boot/startup/sev-shared.c
> @@ -640,6 +640,35 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
> }
> }
>
> +static void __head __page_state_change(unsigned long vaddr, unsigned long paddr,
> + enum psc_op op)
> +{
> + u64 val;
> +
> + /*
> + * If private -> shared then invalidate the page before requesting the
> + * state change in the RMP table.
> + */
> + if (op == SNP_PAGE_STATE_SHARED)
> + pvalidate_4k_page(vaddr, paddr, false);
> +
> + /* Issue VMGEXIT to change the page state in RMP table. */
> + sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
> + VMGEXIT();
> +
> + /* Read the response of the VMGEXIT. */
> + val = sev_es_rd_ghcb_msr();
> + if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
> + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
> +
> + /*
> + * Now that page state is changed in the RMP table, validate it so that it is
> + * consistent with the RMP entry.
> + */
> + if (op == SNP_PAGE_STATE_PRIVATE)
> + pvalidate_4k_page(vaddr, paddr, true);
> +}
> +
> /*
> * Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM
> * services needed when not running in VMPL0.
> diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
> index 8edf1ba78a48..2ffd8bf09357 100644
> --- a/arch/x86/boot/startup/sev-startup.c
> +++ b/arch/x86/boot/startup/sev-startup.c
> @@ -135,7 +135,6 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
> unsigned long npages, enum psc_op op)
> {
> unsigned long paddr_end;
> - u64 val;
>
> vaddr = vaddr & PAGE_MASK;
>
> @@ -143,37 +142,11 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
> paddr_end = paddr + (npages << PAGE_SHIFT);
>
> while (paddr < paddr_end) {
> - /* Page validation must be rescinded before changing to shared */
> - if (op == SNP_PAGE_STATE_SHARED)
> - pvalidate_4k_page(vaddr, paddr, false);
> -
> - /*
> - * Use the MSR protocol because this function can be called before
> - * the GHCB is established.
> - */
> - sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
> - VMGEXIT();
> -
> - val = sev_es_rd_ghcb_msr();
> -
> - if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
> - goto e_term;
> -
> - if (GHCB_MSR_PSC_RESP_VAL(val))
> - goto e_term;
> -
> - /* Page validation must be performed after changing to private */
> - if (op == SNP_PAGE_STATE_PRIVATE)
> - pvalidate_4k_page(vaddr, paddr, true);
> + __page_state_change(vaddr, paddr, op);
>
> vaddr += PAGE_SIZE;
> paddr += PAGE_SIZE;
> }
> -
> - return;
> -
> -e_term:
> - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
> }
>
> void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
Powered by blists - more mailing lists