[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170927151329.70011-14-brijesh.singh@amd.com>
Date: Wed, 27 Sep 2017 10:13:25 -0500
From: Brijesh Singh <brijesh.singh@....com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, x86@...nel.org
Cc: Tom Lendacky <thomas.lendacky@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, Borislav Petkov <bp@...e.de>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
David Laight <David.Laight@...LAB.COM>,
Arnd Bergmann <arnd@...db.de>,
Brijesh Singh <brijesh.singh@....com>
Subject: [Part1 PATCH v5 13/17] x86/io: Unroll string I/O when SEV is active
From: Tom Lendacky <thomas.lendacky@....com>
Secure Encrypted Virtualization (SEV) does not support string I/O, so
unroll the string I/O operation into a loop operating on one element at
a time.
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Borislav Petkov <bp@...e.de>
Cc: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
Cc: David Laight <David.Laight@...LAB.COM>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: x86@...nel.org
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
Signed-off-by: Brijesh Singh <brijesh.singh@....com>
---
arch/x86/include/asm/io.h | 42 ++++++++++++++++++++++++++++++++++++++----
arch/x86/mm/mem_encrypt.c | 8 ++++++++
2 files changed, 46 insertions(+), 4 deletions(-)
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index c40a95c33bb8..a785270b53e1 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -265,6 +265,20 @@ static inline void slow_down_io(void)
#endif
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+
+extern struct static_key_false __sev;
+static inline bool sev_key_active(void)
+{
+ return static_branch_unlikely(&__sev);
+}
+
+#else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+static inline bool sev_key_active(void) { return false; }
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
#define BUILDIO(bwl, bw, type) \
static inline void out##bwl(unsigned type value, int port) \
{ \
@@ -295,14 +309,34 @@ static inline unsigned type in##bwl##_p(int port) \
\
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
- asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ if (sev_key_active()) { \
+ unsigned type *value = (unsigned type *)addr; \
+ while (count) { \
+ out##bwl(*value, port); \
+ value++; \
+ count--; \
+ } \
+ } else { \
+ asm volatile("rep; outs" #bwl \
+ : "+S"(addr), "+c"(count) \
+ : "d"(port) : "memory"); \
+ } \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
- asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ if (sev_key_active()) { \
+ unsigned type *value = (unsigned type *)addr; \
+ while (count) { \
+ *value = in##bwl(port); \
+ value++; \
+ count--; \
+ } \
+ } else { \
+ asm volatile("rep; ins" #bwl \
+ : "+D"(addr), "+c"(count) \
+ : "d"(port) : "memory"); \
+ } \
}
BUILDIO(b, b, char)
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index bc6ba4cbe9b4..05c3cb9fb442 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -39,6 +39,8 @@ static char sme_cmdline_off[] __initdata = "off";
*/
u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask);
+DEFINE_STATIC_KEY_FALSE(__sev);
+EXPORT_SYMBOL_GPL(__sev);
static bool sev_enabled;
@@ -311,6 +313,12 @@ void __init mem_encrypt_init(void)
if (sev_active())
dma_ops = &sev_dma_ops;
+ /*
+ * With SEV, we need to unroll the rep string I/O instructions.
+ */
+ if (sev_active())
+ static_branch_enable(&__sev);
+
pr_info("AMD %s active\n",
sev_active() ? "Secure Encrypted Virtualization (SEV)"
: "Secure Memory Encryption (SME)");
--
2.9.5
Powered by blists - more mailing lists