[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240712-asi-rfc-24-v1-26-144b319a40d8@google.com>
Date: Fri, 12 Jul 2024 17:00:44 +0000
From: Brendan Jackman <jackmanb@...gle.com>
To: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, "H. Peter Anvin" <hpa@...or.com>,
Andy Lutomirski <luto@...nel.org>, Peter Zijlstra <peterz@...radead.org>,
Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>,
Alexandre Chartre <alexandre.chartre@...cle.com>, Liran Alon <liran.alon@...cle.com>,
Jan Setje-Eilers <jan.setjeeilers@...cle.com>, Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Mark Rutland <mark.rutland@....com>,
Andrew Morton <akpm@...ux-foundation.org>, Mel Gorman <mgorman@...e.de>,
Lorenzo Stoakes <lstoakes@...il.com>, David Hildenbrand <david@...hat.com>, Vlastimil Babka <vbabka@...e.cz>,
Michal Hocko <mhocko@...nel.org>, Khalid Aziz <khalid.aziz@...cle.com>,
Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt <rostedt@...dmis.org>,
Valentin Schneider <vschneid@...hat.com>, Paul Turner <pjt@...gle.com>, Reiji Watanabe <reijiw@...gle.com>,
Junaid Shahid <junaids@...gle.com>, Ofir Weisse <oweisse@...gle.com>,
Yosry Ahmed <yosryahmed@...gle.com>, Patrick Bellasi <derkling@...gle.com>,
KP Singh <kpsingh@...gle.com>, Alexandra Sandulescu <aesa@...gle.com>,
Matteo Rizzo <matteorizzo@...gle.com>, Jann Horn <jannh@...gle.com>
Cc: x86@...nel.org, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
kvm@...r.kernel.org, Brendan Jackman <jackmanb@...gle.com>
Subject: [PATCH 26/26] KVM: x86: asi: Add some mitigations on address space transitions
Here we start actually turning ASI into a real exploit mitigation. On
all CPUs we attempt to obliterate any indirect branch predictor training
before mapping in any secrets. We can also flush side channels on the
inverse transition. So, in this iteration we flush L1D, but only on CPUs
affected by L1TF.
The rationale for this is: L1TF seems to have been a relative outlier in
terms of its impact, and the mitigation is obviously rather devastating.
On the other hand, Spectre-type attacks are continuously being found,
and it's quite reasonable to assume that existing systems are vulnerable
to variations that are not currently mitigated by bespoke techniques
like Safe RET.
This is clearly an incomplete policy, for example it probably makes
sense to perform MDS mitigations in post_asi_enter, and there is clearly
a wide range of alternative postures with regard to per-platform vs
blanket mitigation configurations. This also ought to be integrated more
intelligently with bugs.c - this will probably require a fair bit of
discussion so it might warrant a patchset all to itself. For now though,
this ouhgt to provide an example of the kind of thing we might do with
ASI.
The changes to the inline asm for L1D flushes are to avoid duplicate
jump labels breaking the build in the case that vmx_l1d_flush() gets
inlined at multiple locations (as it seems to do in my builds).
Signed-off-by: Brendan Jackman <jackmanb@...gle.com>
---
arch/x86/include/asm/kvm_host.h | 2 +
arch/x86/include/asm/nospec-branch.h | 2 +
arch/x86/kvm/vmx/vmx.c | 88 ++++++++++++++++++++++++------------
arch/x86/kvm/x86.c | 33 +++++++++++++-
arch/x86/lib/retpoline.S | 7 +++
5 files changed, 101 insertions(+), 31 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6c3326cb8273c..8b7226dd2e027 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1840,6 +1840,8 @@ struct kvm_x86_init_ops {
struct kvm_x86_ops *runtime_ops;
struct kvm_pmu_ops *pmu_ops;
+
+ void (*post_asi_enter)(void);
};
struct kvm_arch_async_pf {
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index ff5f1ecc7d1e6..9502bdafc1edd 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -605,6 +605,8 @@ static __always_inline void mds_idle_clear_cpu_buffers(void)
mds_clear_cpu_buffers();
}
+extern void fill_return_buffer(void);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 1105d666a8ade..6efcbddf6ce27 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6629,37 +6629,18 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
* is not exactly LRU. This could be sized at runtime via topology
* information but as all relevant affected CPUs have 32KiB L1D cache size
* there is no point in doing so.
+ *
+ * Must be reentrant, for use by vmx_post_asi_enter.
*/
-static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+static inline_or_noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;
/*
- * This code is only executed when the flush mode is 'cond' or
- * 'always'
+ * In theory we lose some of these increments to reentrancy under ASI.
+ * We just tolerate imprecise stats rather than deal with synchronizing.
+ * Anyway in practice on 64 bit it's gonna be a single instruction.
*/
- if (static_branch_likely(&vmx_l1d_flush_cond)) {
- bool flush_l1d;
-
- /*
- * Clear the per-vcpu flush bit, it gets set again
- * either from vcpu_run() or from one of the unsafe
- * VMEXIT handlers.
- */
- flush_l1d = vcpu->arch.l1tf_flush_l1d;
- vcpu->arch.l1tf_flush_l1d = false;
-
- /*
- * Clear the per-cpu flush bit, it gets set again from
- * the interrupt handlers.
- */
- flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
- kvm_clear_cpu_l1tf_flush_l1d();
-
- if (!flush_l1d)
- return;
- }
-
vcpu->stat.l1d_flush++;
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
@@ -6670,26 +6651,57 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
asm volatile(
/* First ensure the pages are in the TLB */
"xorl %%eax, %%eax\n"
- ".Lpopulate_tlb:\n\t"
+ ".Lpopulate_tlb_%=:\n\t"
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $4096, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
- "jne .Lpopulate_tlb\n\t"
+ "jne .Lpopulate_tlb_%=\n\t"
"xorl %%eax, %%eax\n\t"
"cpuid\n\t"
/* Now fill the cache */
"xorl %%eax, %%eax\n"
- ".Lfill_cache:\n"
+ ".Lfill_cache_%=:\n"
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $64, %%eax\n\t"
"cmpl %%eax, %[size]\n\t"
- "jne .Lfill_cache\n\t"
+ "jne .Lfill_cache_%=\n\t"
"lfence\n"
:: [flush_pages] "r" (vmx_l1d_flush_pages),
[size] "r" (size)
: "eax", "ebx", "ecx", "edx");
}
+static noinstr void vmx_maybe_l1d_flush(struct kvm_vcpu *vcpu)
+{
+ /*
+ * This code is only executed when the flush mode is 'cond' or
+ * 'always'
+ */
+ if (static_branch_likely(&vmx_l1d_flush_cond)) {
+ bool flush_l1d;
+
+ /*
+ * Clear the per-vcpu flush bit, it gets set again
+ * either from vcpu_run() or from one of the unsafe
+ * VMEXIT handlers.
+ */
+ flush_l1d = vcpu->arch.l1tf_flush_l1d;
+ vcpu->arch.l1tf_flush_l1d = false;
+
+ /*
+ * Clear the per-cpu flush bit, it gets set again from
+ * the interrupt handlers.
+ */
+ flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
+ kvm_clear_cpu_l1tf_flush_l1d();
+
+ if (!flush_l1d)
+ return;
+ }
+
+ vmx_l1d_flush(vcpu);
+}
+
static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -7284,7 +7296,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
* This is only after asi_enter() for performance reasons.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush))
- vmx_l1d_flush(vcpu);
+ vmx_maybe_l1d_flush(vcpu);
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
@@ -8321,6 +8333,14 @@ gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags
return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
}
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+static noinstr void vmx_post_asi_enter(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_L1TF))
+ vmx_l1d_flush(kvm_get_running_vcpu());
+}
+#endif
+
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = KBUILD_MODNAME,
@@ -8727,6 +8747,14 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
.runtime_ops = &vmx_x86_ops,
.pmu_ops = &intel_pmu_ops,
+
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+ /*
+ * Only Intel CPUs currently do anything in post-enter, so this is a
+ * vendor hook for now.
+ */
+ .post_asi_enter = vmx_post_asi_enter,
+#endif
};
static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b9947e88d4ac6..b5e4df2aa1636 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9695,6 +9695,36 @@ static void kvm_x86_check_cpu_compat(void *ret)
*(int *)ret = kvm_x86_check_processor_compatibility();
}
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+
+static noinstr void pre_asi_exit(void)
+{
+ /*
+ * Flush out prediction trainings by the guest before we go to access
+ * secrets.
+ */
+
+ /* Clear normal indirect branch predictions, if we haven't */
+ if (cpu_feature_enabled(X86_FEATURE_IBPB) &&
+ !cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
+ __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
+
+ /* Flush the RAS/RSB if we haven't already. */
+ if (!IS_ENABLED(CONFIG_RETPOLINE) ||
+ !cpu_feature_enabled(X86_FEATURE_RSB_VMEXIT))
+ fill_return_buffer();
+}
+
+struct asi_hooks asi_hooks = {
+ .pre_asi_exit = pre_asi_exit,
+ /* post_asi_enter populated later. */
+};
+
+#else /* CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION */
+struct asi_hooks asi_hooks = {};
+#endif /* CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION */
+
+
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
u64 host_pat;
@@ -9753,7 +9783,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (r)
goto out_free_percpu;
- r = asi_register_class("KVM", NULL);
+ asi_hooks.post_asi_enter = ops->post_asi_enter;
+ r = asi_register_class("KVM", &asi_hooks);
if (r < 0)
goto out_mmu_exit;
kvm_asi_index = r;
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 391059b2c6fbc..db5b8ee01efeb 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -396,3 +396,10 @@ SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_MITIGATION_RETHUNK */
+
+.pushsection .noinstr.text, "ax"
+SYM_CODE_START(fill_return_buffer)
+ __FILL_RETURN_BUFFER(%_ASM_AX,RSB_CLEAR_LOOPS)
+ RET
+SYM_CODE_END(fill_return_buffer)
+.popsection
--
2.45.2.993.g49e7a77208-goog
Powered by blists - more mailing lists