[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c6b40305-d643-6023-907b-e6858d422a36@linux.intel.com>
Date: Thu, 13 May 2021 09:40:34 -0700
From: "Kuppuswamy, Sathyanarayanan"
<sathyanarayanan.kuppuswamy@...ux.intel.com>
To: Sean Christopherson <seanjc@...gle.com>,
Dave Hansen <dave.hansen@...el.com>
Cc: "Kirill A. Shutemov" <kirill@...temov.name>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>,
Dan Williams <dan.j.williams@...el.com>,
Tony Luck <tony.luck@...el.com>,
Andi Kleen <ak@...ux.intel.com>,
Kirill Shutemov <kirill.shutemov@...ux.intel.com>,
Kuppuswamy Sathyanarayanan <knsathya@...nel.org>,
Raj Ashok <ashok.raj@...el.com>, linux-kernel@...r.kernel.org
Subject: Re: [RFC v2 26/32] x86/mm: Move force_dma_unencrypted() to common
code
On 5/12/21 8:53 AM, Sean Christopherson wrote:
> On Wed, May 12, 2021, Dave Hansen wrote:
>> On 5/12/21 6:08 AM, Kirill A. Shutemov wrote:
>>>> That's not an excuse to have a bunch of AMD (or Intel) feature-specific
>>>> code in a file named "common". I'd make an attempt to keep them
>>>> separate and then call into the two separate functions *from* the common
>>>> function.
>>> But why? What good does the additional level of inderection brings?
>>>
>>> It's like saying arch/x86/kernel/cpu/common.c shouldn't have anything AMD
>>> or Intel specific. If a function can cover both vendors I don't see a
>>> point for additinal complexity.
>>
>> Because the code is already separate. You're actually going to some
>> trouble to move the SEV-specific code and then combine it with the
>> TDX-specific code.
>>
>> Anyway, please just give it a shot. Should take all of ten minutes. If
>> it doesn't work out in practice, fine. You'll have a good paragraph for
>> the changelog.
>
> Or maybe wait to see how Boris' propose protected_guest_has() pans out? E.g. if
> we can do "protected_guest_has(MEMORY_ENCRYPTION)" or whatever, then the truly
> common bits could be placed into common.c without any vendor-specific logic.
How about following abstraction? This patch was initially created to enable us use
is_tdx_guest() outside of arch/x86 code. But extended it to support bitmap flags.
commit 188bdd3c97e49020b2bda9efd992a22091423b85
Author: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@...ux.intel.com>
Date: Wed May 12 11:35:13 2021 -0700
tdx: Introduce generic protected_guest abstraction
Add a generic way to check if we run with an encrypted guest,
without requiring x86 specific ifdefs. This can then be used in
non architecture specific code. Enablethis when running under
TDX/SEV.
Also add helper functions to set/test encrypted guest feature
flags.
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@...ux.intel.com>
diff --git a/arch/Kconfig b/arch/Kconfig
index ecfd3520b676..98c30312555b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -956,6 +956,9 @@ config HAVE_ARCH_NVRAM_OPS
config ISA_BUS_API
def_bool ISA
+config ARCH_HAS_PROTECTED_GUEST
+ bool
+
#
# ABI hall of shame
#
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 07fb4df1d881..001487c21874 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -882,6 +882,7 @@ config INTEL_TDX_GUEST
select PARAVIRT_XL
select X86_X2APIC
select SECURITY_LOCKDOWN_LSM
+ select ARCH_HAS_PROTECTED_GUEST
help
Provide support for running in a trusted domain on Intel processors
equipped with Trusted Domain eXtenstions. TDX is a new Intel
@@ -1537,6 +1538,7 @@ config AMD_MEM_ENCRYPT
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select INSTRUCTION_DECODER
+ select ARCH_HAS_PROTECTED_GUEST
help
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ccab6cf91283..8260893c34ae 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -21,6 +21,7 @@
#include <linux/usb/xhci-dbgp.h>
#include <linux/static_call.h>
#include <linux/swiotlb.h>
+#include <linux/protected_guest.h>
#include <uapi/linux/mount.h>
@@ -107,6 +108,10 @@ static struct resource bss_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
+#ifdef CONFIG_ARCH_HAS_PROTECTED_GUEST
+DECLARE_BITMAP(protected_guest_flags, PROTECTED_GUEST_BITMAP_LEN);
+EXPORT_SYMBOL(protected_guest_flags);
+#endif
#ifdef CONFIG_X86_32
/* CPU data as detected by the assembly code in head_32.S */
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index 04a780abb512..45b848ec8325 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev-es.c
@@ -19,6 +19,7 @@
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/protected_guest.h>
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
@@ -680,6 +681,9 @@ static void __init init_ghcb(int cpu)
data->ghcb_active = false;
data->backup_ghcb_active = false;
+
+ set_protected_guest_flag(GUEST_TYPE_SEV);
+ set_protected_guest_flag(MEMORY_ENCRYPTION);
}
void __init sev_es_init_vc_handling(void)
diff --git a/arch/x86/kernel/tdx.c b/arch/x86/kernel/tdx.c
index 4dfacde05f0c..d0207b990fe4 100644
--- a/arch/x86/kernel/tdx.c
+++ b/arch/x86/kernel/tdx.c
@@ -7,6 +7,7 @@
#include <asm/vmx.h>
#include <linux/cpu.h>
+#include <linux/protected_guest.h>
static struct {
unsigned int gpa_width;
@@ -92,6 +93,9 @@ void __init tdx_early_init(void)
setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
+ set_protected_guest_flag(GUEST_TYPE_TDX);
+ set_protected_guest_flag(MEMORY_ENCRYPTION);
+
tdg_get_info();
pr_info("TDX guest is initialized\n");
diff --git a/include/linux/protected_guest.h b/include/linux/protected_guest.h
new file mode 100644
index 000000000000..44e8c642654c
--- /dev/null
+++ b/include/linux/protected_guest.h
@@ -0,0 +1,37 @@
+#ifndef _LINUX_PROTECTED_GUEST_H
+#define _LINUX_PROTECTED_GUEST_H 1
+
+#define PROTECTED_GUEST_BITMAP_LEN 128
+
+/* Protected Guest vendor types */
+#define GUEST_TYPE_TDX (1)
+#define GUEST_TYPE_SEV (2)
+
+/* Protected Guest features */
+#define MEMORY_ENCRYPTION (20)
+
+#ifdef CONFIG_ARCH_HAS_PROTECTED_GUEST
+extern DECLARE_BITMAP(protected_guest_flags, PROTECTED_GUEST_BITMAP_LEN);
+
+static bool protected_guest_has(unsigned long flag)
+{
+ return test_bit(flag, protected_guest_flags);
+}
+
+static inline void set_protected_guest_flag(unsigned long flag)
+{
+ __set_bit(flag, protected_guest_flags);
+}
+
+static inline bool is_protected_guest(void)
+{
+ return ( protected_guest_has(GUEST_TYPE_TDX) |
+ protected_guest_has(GUEST_TYPE_SEV) );
+}
+#else
+static inline bool protected_guest_has(unsigned long flag) { return false; }
+static inline void set_protected_guest_flag(unsigned long flag) { }
+static inline bool is_protected_guest(void) { return false; }
+#endif
+
+#endif
>
--
Sathyanarayanan Kuppuswamy
Linux Kernel Developer
Powered by blists - more mailing lists