lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211007061838.1381129-2-dovmurik@linux.ibm.com>
Date:   Thu,  7 Oct 2021 06:18:35 +0000
From:   Dov Murik <dovmurik@...ux.ibm.com>
To:     linux-efi@...r.kernel.org
Cc:     Dov Murik <dovmurik@...ux.ibm.com>, Borislav Petkov <bp@...e.de>,
        Ashish Kalra <ashish.kalra@....com>,
        Brijesh Singh <brijesh.singh@....com>,
        Tom Lendacky <thomas.lendacky@....com>,
        Ard Biesheuvel <ardb@...nel.org>,
        James Morris <jmorris@...ei.org>,
        "Serge E. Hallyn" <serge@...lyn.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Greg KH <gregkh@...uxfoundation.org>,
        Andrew Scull <ascull@...gle.com>,
        "Dr. David Alan Gilbert" <dgilbert@...hat.com>,
        James Bottomley <jejb@...ux.ibm.com>,
        Tobin Feldman-Fitzthum <tobin@...ux.ibm.com>,
        Jim Cadden <jcadden@....com>,
        Daniele Buono <dbuono@...ux.vnet.ibm.com>,
        linux-coco@...ts.linux.dev, linux-security-module@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: [PATCH v2 1/4] x86: Export clean_cache_range()

Export clean_cache_range() which is similar to the existing
clflush_cache_range() but uses the CLWB (cache line write back)
instruction instead of CLFLUSH.

Remove existing implementation of clean_cache_range() from
arch/x86/lib/usercopy_64.c .

Signed-off-by: Dov Murik <dovmurik@...ux.ibm.com>
---
 arch/x86/include/asm/cacheflush.h |  1 +
 arch/x86/lib/usercopy_64.c        | 21 ---------------------
 arch/x86/mm/pat/set_memory.c      | 30 ++++++++++++++++++++++++++++++
 3 files changed, 31 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index b192d917a6d0..76452ba1bafb 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -9,5 +9,6 @@
 #include <asm/special_insns.h>
 
 void clflush_cache_range(void *addr, unsigned int size);
+void clean_cache_range(void *vaddr, unsigned int size);
 
 #endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 508c81e97ab1..ffd39f1d4251 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -57,27 +57,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
 EXPORT_SYMBOL(clear_user);
 
 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
-/**
- * clean_cache_range - write back a cache range with CLWB
- * @vaddr:	virtual start address
- * @size:	number of bytes to write back
- *
- * Write back a cache range using the CLWB (cache line write back)
- * instruction. Note that @size is internally rounded up to be cache
- * line size aligned.
- */
-static void clean_cache_range(void *addr, size_t size)
-{
-	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
-	unsigned long clflush_mask = x86_clflush_size - 1;
-	void *vend = addr + size;
-	void *p;
-
-	for (p = (void *)((unsigned long)addr & ~clflush_mask);
-	     p < vend; p += x86_clflush_size)
-		clwb(p);
-}
-
 void arch_wb_cache_pmem(void *addr, size_t size)
 {
 	clean_cache_range(addr, size);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index ad8a5c586a35..8de029a21e03 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -319,6 +319,36 @@ void clflush_cache_range(void *vaddr, unsigned int size)
 }
 EXPORT_SYMBOL_GPL(clflush_cache_range);
 
+static void clean_cache_range_opt(void *vaddr, unsigned int size)
+{
+	const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
+	void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
+	void *vend = vaddr + size;
+
+	if (p >= vend)
+		return;
+
+	for (; p < vend; p += clflush_size)
+		clwb(p);
+}
+
+/**
+ * clean_cache_range - write back a cache range with CLWB
+ * @vaddr:	virtual start address
+ * @size:	number of bytes to write back
+ *
+ * CLWB (cache line write back) is an unordered instruction which needs fencing
+ * with MFENCE or SFENCE to avoid ordering issues. Note that @size is
+ * internally rounded up to be cache line size aligned.
+ */
+void clean_cache_range(void *vaddr, unsigned int size)
+{
+	mb();
+	clean_cache_range_opt(vaddr, size);
+	mb();
+}
+EXPORT_SYMBOL_GPL(clean_cache_range);
+
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_invalidate_pmem(void *addr, size_t size)
 {
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ