[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220321234844.1543161-10-bgardon@google.com>
Date: Mon, 21 Mar 2022 16:48:42 -0700
From: Ben Gardon <bgardon@...gle.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>, Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Jim Mattson <jmattson@...gle.com>,
David Dunn <daviddunn@...gle.com>,
Jing Zhang <jingzhangos@...gle.com>,
Junaid Shahid <junaids@...gle.com>,
Ben Gardon <bgardon@...gle.com>
Subject: [PATCH v2 09/11] KVM: x86/MMU: Allow NX huge pages to be disabled on
a per-vm basis
In some cases, the NX hugepage mitigation for iTLB multihit is not
needed for all guests on a host. Allow disabling the mitigation on a
per-VM basis to avoid the performance hit of NX hugepages on trusted
workloads.
Signed-off-by: Ben Gardon <bgardon@...gle.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.h | 1 +
arch/x86/kvm/mmu/mmu.c | 6 ++++--
arch/x86/kvm/x86.c | 6 ++++++
include/uapi/linux/kvm.h | 1 +
5 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0a0c54639dd8..04ddfc475ce0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1242,6 +1242,7 @@ struct kvm_arch {
#endif
bool nx_huge_pages;
+ bool disable_nx_huge_pages;
};
struct kvm_vm_stat {
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index dd28fe8d13ae..36d8d84ca6c6 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -177,6 +177,7 @@ static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
{
return READ_ONCE(kvm->arch.nx_huge_pages);
}
+void kvm_update_nx_huge_pages(struct kvm *kvm);
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 err, bool prefetch)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index dc9672f70468..a7d387ccfd74 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6195,9 +6195,10 @@ static void __set_nx_huge_pages(bool val)
nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}
-static void kvm_update_nx_huge_pages(struct kvm *kvm)
+void kvm_update_nx_huge_pages(struct kvm *kvm)
{
- kvm->arch.nx_huge_pages = nx_huge_pages;
+ kvm->arch.nx_huge_pages = nx_huge_pages &&
+ !kvm->arch.disable_nx_huge_pages;
mutex_lock(&kvm->slots_lock);
kvm_mmu_zap_all_fast(kvm);
@@ -6451,6 +6452,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
int err;
kvm->arch.nx_huge_pages = READ_ONCE(nx_huge_pages);
+ kvm->arch.disable_nx_huge_pages = false;
err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
"kvm-nx-lpage-recovery",
&kvm->arch.nx_lpage_recovery_thread);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51106d32f04e..73df90a6932b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4256,6 +4256,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SYS_ATTRIBUTES:
case KVM_CAP_VAPIC:
case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
r = 1;
break;
case KVM_CAP_EXIT_HYPERCALL:
@@ -6048,6 +6049,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
}
mutex_unlock(&kvm->lock);
break;
+ case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
+ kvm->arch.disable_nx_huge_pages = true;
+ kvm_update_nx_huge_pages(kvm);
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index ee5cc9e2a837..6f9fa7ecfd1e 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1144,6 +1144,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_MEM_OP_EXTENSION 211
#define KVM_CAP_PMU_CAPABILITY 212
#define KVM_CAP_DISABLE_QUIRKS2 213
+#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 214
#ifdef KVM_CAP_IRQ_ROUTING
--
2.35.1.894.gb6a874cedc-goog
Powered by blists - more mailing lists