[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220310164532.1821490-10-bgardon@google.com>
Date: Thu, 10 Mar 2022 08:45:28 -0800
From: Ben Gardon <bgardon@...gle.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>, Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Jim Mattson <jmattson@...gle.com>,
David Dunn <daviddunn@...gle.com>,
Jing Zhang <jingzhangos@...gle.com>,
Junaid Shahid <junaids@...gle.com>,
Ben Gardon <bgardon@...gle.com>
Subject: [PATCH 09/13] KVM: x86/MMU: Track NX hugepages on a per-VM basis
Track whether NX hugepages are enabled on a per-VM basis instead of as a
host-wide setting. With this commit, the per-VM state will always be the
same as the host-wide setting, but in future commits, it will be allowed
to differ.
No functional change intended.
Signed-off-by: Ben Gardon <bgardon@...gle.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/mmu.h | 8 ++++----
arch/x86/kvm/mmu/mmu.c | 7 +++++--
arch/x86/kvm/mmu/spte.c | 7 ++++---
arch/x86/kvm/mmu/spte.h | 3 ++-
arch/x86/kvm/mmu/tdp_mmu.c | 3 ++-
6 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f72e80178ffc..0a0c54639dd8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1240,6 +1240,8 @@ struct kvm_arch {
hpa_t hv_root_tdp;
spinlock_t hv_root_tdp_lock;
#endif
+
+ bool nx_huge_pages;
};
struct kvm_vm_stat {
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index bf8dbc4bb12a..dd28fe8d13ae 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -173,9 +173,9 @@ struct kvm_page_fault {
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
extern int nx_huge_pages;
-static inline bool is_nx_huge_page_enabled(void)
+static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
{
- return READ_ONCE(nx_huge_pages);
+ return READ_ONCE(kvm->arch.nx_huge_pages);
}
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
@@ -191,8 +191,8 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.user = err & PFERR_USER_MASK,
.prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
- .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
-
+ .nx_huge_page_workaround_enabled =
+ is_nx_huge_page_enabled(vcpu->kvm),
.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1b59b56642f1..dc9672f70468 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6195,8 +6195,10 @@ static void __set_nx_huge_pages(bool val)
nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}
-static int kvm_update_nx_huge_pages(struct kvm *kvm)
+static void kvm_update_nx_huge_pages(struct kvm *kvm)
{
+ kvm->arch.nx_huge_pages = nx_huge_pages;
+
mutex_lock(&kvm->slots_lock);
kvm_mmu_zap_all_fast(kvm);
mutex_unlock(&kvm->slots_lock);
@@ -6227,7 +6229,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
- kvm_set_nx_huge_pages(kvm);
+ kvm_update_nx_huge_pages(kvm);
mutex_unlock(&kvm_lock);
}
@@ -6448,6 +6450,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
{
int err;
+ kvm->arch.nx_huge_pages = READ_ONCE(nx_huge_pages);
err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
"kvm-nx-lpage-recovery",
&kvm->arch.nx_lpage_recovery_thread);
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 4739b53c9734..877ad30bc7ad 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -116,7 +116,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
spte |= spte_shadow_accessed_mask(spte);
if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
- is_nx_huge_page_enabled()) {
+ is_nx_huge_page_enabled(vcpu->kvm)) {
pte_access &= ~ACC_EXEC_MASK;
}
@@ -215,7 +215,8 @@ static u64 make_spte_executable(u64 spte)
* This is used during huge page splitting to build the SPTEs that make up the
* new page table.
*/
-u64 make_huge_page_split_spte(u64 huge_spte, int huge_level, int index)
+u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, int huge_level,
+ int index)
{
u64 child_spte;
int child_level;
@@ -243,7 +244,7 @@ u64 make_huge_page_split_spte(u64 huge_spte, int huge_level, int index)
* When splitting to a 4K page, mark the page executable as the
* NX hugepage mitigation no longer applies.
*/
- if (is_nx_huge_page_enabled())
+ if (is_nx_huge_page_enabled(kvm))
child_spte = make_spte_executable(child_spte);
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 73f12615416f..e4142caff4b1 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -415,7 +415,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte);
-u64 make_huge_page_split_spte(u64 huge_spte, int huge_level, int index);
+u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, int huge_level,
+ int index);
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
u64 mark_spte_for_access_track(u64 spte);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index af60922906ef..98a45a87f0b2 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1466,7 +1466,8 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
* not been linked in yet and thus is not reachable from any other CPU.
*/
for (i = 0; i < PT64_ENT_PER_PAGE; i++)
- sp->spt[i] = make_huge_page_split_spte(huge_spte, level, i);
+ sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte,
+ level, i);
/*
* Replace the huge spte with a pointer to the populated lower level
--
2.35.1.616.g0bdcbb4464-goog
Powered by blists - more mailing lists