[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220926100756.560206336@linuxfoundation.org>
Date: Mon, 26 Sep 2022 12:11:24 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Sean Christpherson <seanjc@...gle.com>,
Mingwei Zhang <mizhang@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Ovidiu Panait <ovidiu.panait@...driver.com>,
Liam Merwick <liam.merwick@...cle.com>
Subject: [PATCH 5.10 058/141] KVM: SEV: add cache flush to solve SEV cache incoherency issues
From: Mingwei Zhang <mizhang@...gle.com>
commit 683412ccf61294d727ead4a73d97397396e69a6b upstream.
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: stable@...r.kernel.org
Suggested-by: Sean Christpherson <seanjc@...gle.com>
Reported-by: Mingwei Zhang <mizhang@...gle.com>
Signed-off-by: Mingwei Zhang <mizhang@...gle.com>
Message-Id: <20220421031407.2516575-4-mizhang@...gle.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
[OP: applied kvm_arch_guest_memory_reclaimed() calls in kvm_set_memslot() and
kvm_mmu_notifier_invalidate_range_start();
OP: adjusted kvm_arch_guest_memory_reclaimed() to not use static_call_cond()]
Signed-off-by: Ovidiu Panait <ovidiu.panait@...driver.com>
Reviewed-by: Liam Merwick <liam.merwick@...cle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm/sev.c | 8 ++++++++
arch/x86/kvm/svm/svm.c | 1 +
arch/x86/kvm/svm/svm.h | 2 ++
arch/x86/kvm/x86.c | 6 ++++++
include/linux/kvm_host.h | 2 ++
virt/kvm/kvm_main.c | 16 ++++++++++++++--
7 files changed, 34 insertions(+), 2 deletions(-)
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1275,6 +1275,7 @@ struct kvm_x86_ops {
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+ void (*guest_memory_reclaimed)(struct kvm *kvm);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void)
sev_flush_asids();
}
+void sev_guest_memory_reclaimed(struct kvm *kvm)
+{
+ if (!sev_guest(kvm))
+ return;
+
+ wbinvd_on_all_cpus();
+}
+
void pre_sev_run(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4325,6 +4325,7 @@ static struct kvm_x86_ops svm_x86_ops __
.mem_enc_op = svm_mem_enc_op,
.mem_enc_reg_region = svm_register_enc_region,
.mem_enc_unreg_region = svm_unregister_enc_region,
+ .guest_memory_reclaimed = sev_guest_memory_reclaimed,
.can_emulate_instruction = svm_can_emulate_instruction,
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *
struct kvm_enc_region *range);
int svm_unregister_enc_region(struct kvm *kvm,
struct kvm_enc_region *range);
+void sev_guest_memory_reclaimed(struct kvm *kvm);
+
void pre_sev_run(struct vcpu_svm *svm, int cpu);
int __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8875,6 +8875,12 @@ void kvm_arch_mmu_notifier_invalidate_ra
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+ if (kvm_x86_ops.guest_memory_reclaimed)
+ kvm_x86_ops.guest_memory_reclaimed(kvm);
+}
+
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1489,6 +1489,8 @@ static inline long kvm_arch_vcpu_async_i
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end);
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
+
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
#else
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -159,6 +159,10 @@ __weak void kvm_arch_mmu_notifier_invali
{
}
+__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+}
+
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
{
/*
@@ -340,6 +344,12 @@ void kvm_reload_remote_mmus(struct kvm *
kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
+static void kvm_flush_shadow_all(struct kvm *kvm)
+{
+ kvm_arch_flush_shadow_all(kvm);
+ kvm_arch_guest_memory_reclaimed(kvm);
+}
+
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
@@ -489,6 +499,7 @@ static int kvm_mmu_notifier_invalidate_r
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
+ kvm_arch_guest_memory_reclaimed(kvm);
srcu_read_unlock(&kvm->srcu, idx);
return 0;
@@ -592,7 +603,7 @@ static void kvm_mmu_notifier_release(str
int idx;
idx = srcu_read_lock(&kvm->srcu);
- kvm_arch_flush_shadow_all(kvm);
+ kvm_flush_shadow_all(kvm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -896,7 +907,7 @@ static void kvm_destroy_vm(struct kvm *k
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else
- kvm_arch_flush_shadow_all(kvm);
+ kvm_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
@@ -1238,6 +1249,7 @@ static int kvm_set_memslot(struct kvm *k
* - kvm_is_visible_gfn (mmu_check_root)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
+ kvm_arch_guest_memory_reclaimed(kvm);
}
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
Powered by blists - more mailing lists