lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240705142958.GCZogDZlbQWU5vHU34@fat_crate.local>
Date: Fri, 5 Jul 2024 16:29:58 +0200
From: Borislav Petkov <bp@...en8.de>
To: Ashish Kalra <Ashish.Kalra@....com>
Cc: dave.hansen@...ux.intel.com, tglx@...utronix.de, mingo@...hat.com,
	x86@...nel.org, hpa@...or.com, rafael@...nel.org,
	peterz@...radead.org, adrian.hunter@...el.com,
	sathyanarayanan.kuppuswamy@...ux.intel.com, jun.nakajima@...el.com,
	kirill.shutemov@...ux.intel.com, rick.p.edgecombe@...el.com,
	linux-kernel@...r.kernel.org, thomas.lendacky@....com,
	michael.roth@....com, seanjc@...gle.com, kai.huang@...el.com,
	bhe@...hat.com, bdas@...hat.com, vkuznets@...hat.com,
	dionnaglaze@...gle.com, anisinha@...hat.com, ardb@...nel.org,
	dyoung@...hat.com, kexec@...ts.infradead.org,
	linux-coco@...ts.linux.dev, jroedel@...e.de
Subject: Re: [PATCH v11 3/3] x86/snp: Convert shared memory back to private
 on kexec

On Tue, Jul 02, 2024 at 07:58:11PM +0000, Ashish Kalra wrote:
> +static bool make_pte_private(pte_t *pte, unsigned long addr, int pages, int level)
> +{
> +	struct sev_es_runtime_data *data;
> +	struct ghcb *ghcb;
> +	int cpu;
> +
> +	/*
> +	 * Ensure that all the per-cpu GHCBs are made private
> +	 * at the end of unshared loop so that we continue to use the
> +	 * optimized GHCB protocol and not force the switch to
> +	 * MSR protocol till the very end.
> +	 */
> +	for_each_possible_cpu(cpu) {
> +		data = per_cpu(runtime_data, cpu);
> +		ghcb = &data->ghcb_page;
> +		/* Check for GHCB for being part of a PMD range */
> +		if ((unsigned long)ghcb >= addr &&
> +		    (unsigned long)ghcb <= (addr + (pages * PAGE_SIZE)))
> +			return true;
> +	}
> +
> +	set_pte_enc(pte, level, (void *)addr);
> +	snp_set_memory_private(addr, pages);
> +
> +	return true;

Zap make_pte_private()
    
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index f263ceada006..65234ffb1495 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -1022,39 +1022,14 @@ static void set_pte_enc(pte_t *kpte, int level, void *va)
 	set_pte_enc_mask(kpte, d.pfn, d.new_pgprot);
 }
 
-static bool make_pte_private(pte_t *pte, unsigned long addr, int pages, int level)
-{
-	struct sev_es_runtime_data *data;
-	struct ghcb *ghcb;
-	int cpu;
-
-	/*
-	 * Ensure that all the per-cpu GHCBs are made private
-	 * at the end of unshared loop so that we continue to use the
-	 * optimized GHCB protocol and not force the switch to
-	 * MSR protocol till the very end.
-	 */
-	for_each_possible_cpu(cpu) {
-		data = per_cpu(runtime_data, cpu);
-		ghcb = &data->ghcb_page;
-		/* Check for GHCB for being part of a PMD range */
-		if ((unsigned long)ghcb >= addr &&
-		    (unsigned long)ghcb <= (addr + (pages * PAGE_SIZE)))
-			return true;
-	}
-
-	set_pte_enc(pte, level, (void *)addr);
-	snp_set_memory_private(addr, pages);
-
-	return true;
-}
-
 /* Walk the direct mapping and convert all shared memory back to private. */
 static void unshare_all_memory(void)
 {
-	unsigned long addr, end, size;
+	unsigned long addr, end, size, ghcb;
+	struct sev_es_runtime_data *data;
 	unsigned int npages, level;
 	pte_t *pte;
+	int cpu;
 
 	/* Unshare the direct mapping. */
 	addr = PAGE_OFFSET;
@@ -1063,17 +1038,28 @@ static void unshare_all_memory(void)
 	while (addr < end) {
 		pte = lookup_address(addr, &level);
 		size = page_level_size(level);
+		npages = size / PAGE_SIZE;
 
 		if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) {
 			addr += size;
 			continue;
 		}
 
-		npages = size / PAGE_SIZE;
+		/*
+		 * Ensure that all the per-cpu GHCBs are made private at the
+		 * end of unsharing loop so that the switch to the slower MSR
+		 * protocol happens last.
+		 */
+		for_each_possible_cpu(cpu) {
+			data = per_cpu(runtime_data, cpu);
+			ghcb = (unsigned long)&data->ghcb_page;
+
+			if (addr <= ghcb && ghcb <= addr + size)
+				continue;
+		}
 
-		if (!make_pte_private(pte, addr, npages, level))
-			pr_err("Failed to unshare range %#lx-%#lx\n",
-				addr, addr + size);
+		set_pte_enc(pte, level, (void *)addr);
+		snp_set_memory_private(addr, npages);
 	}
 
 	/* Unshare all bss decrypted memory. */


-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ