[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171020143059.3291-14-brijesh.singh@amd.com>
Date: Fri, 20 Oct 2017 09:30:55 -0500
From: Brijesh Singh <brijesh.singh@....com>
To: x86@...nel.org, kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: Borislav Petkov <bp@...en8.de>,
Tom Lendacky <thomas.lendacky@....com>,
Brijesh Singh <brijesh.singh@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, Borislav Petkov <bp@...e.de>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
David Laight <David.Laight@...LAB.COM>,
Arnd Bergmann <arnd@...db.de>
Subject: [Part1 PATCH v7 13/17] x86/io: Unroll string I/O when SEV is active
From: Tom Lendacky <thomas.lendacky@....com>
Secure Encrypted Virtualization (SEV) does not support string I/O, so
unroll the string I/O operation into a loop operating on one element at
a time.
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
Reviewed-by: Borislav Petkov <bp@...e.de>
Tested-by: Borislav Petkov <bp@...e.de>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Borislav Petkov <bp@...e.de>
Cc: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
Cc: David Laight <David.Laight@...LAB.COM>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: x86@...nel.org
Cc: linux-kernel@...r.kernel.org
---
arch/x86/include/asm/io.h | 43 +++++++++++++++++++++++++++++++++++++++----
arch/x86/mm/mem_encrypt.c | 8 ++++++++
2 files changed, 47 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index c40a95c33bb8..b8b8c2e3fb36 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -265,6 +265,21 @@ static inline void slow_down_io(void)
#endif
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+#include <linux/jump_label.h>
+
+extern struct static_key_false __sev;
+static inline bool sev_key_active(void)
+{
+ return static_branch_unlikely(&__sev);
+}
+
+#else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+static inline bool sev_key_active(void) { return false; }
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
#define BUILDIO(bwl, bw, type) \
static inline void out##bwl(unsigned type value, int port) \
{ \
@@ -295,14 +310,34 @@ static inline unsigned type in##bwl##_p(int port) \
\
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
- asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ if (sev_key_active()) { \
+ unsigned type *value = (unsigned type *)addr; \
+ while (count) { \
+ out##bwl(*value, port); \
+ value++; \
+ count--; \
+ } \
+ } else { \
+ asm volatile("rep; outs" #bwl \
+ : "+S"(addr), "+c"(count) \
+ : "d"(port) : "memory"); \
+ } \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
- asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ if (sev_key_active()) { \
+ unsigned type *value = (unsigned type *)addr; \
+ while (count) { \
+ *value = in##bwl(port); \
+ value++; \
+ count--; \
+ } \
+ } else { \
+ asm volatile("rep; ins" #bwl \
+ : "+D"(addr), "+c"(count) \
+ : "d"(port) : "memory"); \
+ } \
}
BUILDIO(b, b, char)
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index fd9c7bb51664..45110b60be21 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -41,6 +41,8 @@ static char sme_cmdline_off[] __initdata = "off";
*/
u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask);
+DEFINE_STATIC_KEY_FALSE(__sev);
+EXPORT_SYMBOL_GPL(__sev);
static bool sev_enabled __section(.data);
@@ -313,6 +315,12 @@ void __init mem_encrypt_init(void)
if (sev_active())
dma_ops = &sev_dma_ops;
+ /*
+ * With SEV, we need to unroll the rep string I/O instructions.
+ */
+ if (sev_active())
+ static_branch_enable(&__sev);
+
pr_info("AMD %s active\n",
sev_active() ? "Secure Encrypted Virtualization (SEV)"
: "Secure Memory Encryption (SME)");
--
2.9.5
Powered by blists - more mailing lists