[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250513141744.905614-2-ardb+git@google.com>
Date: Tue, 13 May 2025 16:17:45 +0200
From: Ard Biesheuvel <ardb+git@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: x86@...nel.org, Ard Biesheuvel <ardb@...nel.org>, Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...nel.org>, Tom Lendacky <thomas.lendacky@....com>
Subject: [PATCH] fixup! x86/sev: Share implementation of MSR-based page state change
From: Ard Biesheuvel <ardb@...nel.org>
This is a fixup for the mentioned patch in principle but it applies to
the end of the v3 series so all subsequent conflicts have already been
resolved.
Without it, PVALIDATE will be passed the PA instead of the VA, which
will break if the early page state API is used after the 1:1 mapping is
unmapped.
Cc: Borislav Petkov <bp@...en8.de>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Tom Lendacky <thomas.lendacky@....com>
---
arch/x86/boot/compressed/sev.c | 6 +++---
arch/x86/boot/startup/sev-shared.c | 8 ++++----
arch/x86/boot/startup/sev-startup.c | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 79309944cb19..f1600b57ff0d 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -55,7 +55,7 @@ void snp_set_page_private(unsigned long paddr)
return;
msr = sev_es_rd_ghcb_msr();
- __page_state_change(paddr, SNP_PAGE_STATE_PRIVATE,
+ __page_state_change(paddr, paddr, SNP_PAGE_STATE_PRIVATE,
(struct svsm_ca *)boot_svsm_caa_pa,
boot_svsm_caa_pa);
sev_es_wr_ghcb_msr(msr);
@@ -69,7 +69,7 @@ void snp_set_page_shared(unsigned long paddr)
return;
msr = sev_es_rd_ghcb_msr();
- __page_state_change(paddr, SNP_PAGE_STATE_SHARED,
+ __page_state_change(paddr, paddr, SNP_PAGE_STATE_SHARED,
(struct svsm_ca *)boot_svsm_caa_pa,
boot_svsm_caa_pa);
sev_es_wr_ghcb_msr(msr);
@@ -100,7 +100,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
u64 msr = sev_es_rd_ghcb_msr();
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
- __page_state_change(pa, SNP_PAGE_STATE_PRIVATE,
+ __page_state_change(pa, pa, SNP_PAGE_STATE_PRIVATE,
(struct svsm_ca *)boot_svsm_caa_pa,
boot_svsm_caa_pa);
sev_es_wr_ghcb_msr(msr);
diff --git a/arch/x86/boot/startup/sev-shared.c b/arch/x86/boot/startup/sev-shared.c
index 77b34ab6c7d8..5ab171120448 100644
--- a/arch/x86/boot/startup/sev-shared.c
+++ b/arch/x86/boot/startup/sev-shared.c
@@ -590,8 +590,8 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
}
}
-static void __page_state_change(unsigned long paddr, enum psc_op op,
- struct svsm_ca *caa, u64 caa_pa)
+static void __page_state_change(unsigned long vaddr, unsigned long paddr,
+ enum psc_op op, struct svsm_ca *caa, u64 caa_pa)
{
u64 val;
@@ -600,7 +600,7 @@ static void __page_state_change(unsigned long paddr, enum psc_op op,
* state change in the RMP table.
*/
if (op == SNP_PAGE_STATE_SHARED)
- pvalidate_4k_page(paddr, paddr, false, caa, caa_pa);
+ pvalidate_4k_page(vaddr, paddr, false, caa, caa_pa);
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
@@ -616,7 +616,7 @@ static void __page_state_change(unsigned long paddr, enum psc_op op,
* consistent with the RMP entry.
*/
if (op == SNP_PAGE_STATE_PRIVATE)
- pvalidate_4k_page(paddr, paddr, true, caa, caa_pa);
+ pvalidate_4k_page(vaddr, paddr, true, caa, caa_pa);
}
/*
diff --git a/arch/x86/boot/startup/sev-startup.c b/arch/x86/boot/startup/sev-startup.c
index 21424157819c..023a157e8563 100644
--- a/arch/x86/boot/startup/sev-startup.c
+++ b/arch/x86/boot/startup/sev-startup.c
@@ -57,7 +57,7 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
paddr_end = paddr + (npages << PAGE_SHIFT);
while (paddr < paddr_end) {
- __page_state_change(paddr, op, caa, caa_pa);
+ __page_state_change(vaddr, paddr, op, caa, caa_pa);
vaddr += PAGE_SIZE;
paddr += PAGE_SIZE;
--
2.49.0.1045.g170613ef41-goog
Powered by blists - more mailing lists