[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <772b20e270b3451aea9714260f2c40ddcc4afe80.1646422845.git.isaku.yamahata@intel.com>
Date: Fri, 4 Mar 2022 11:49:10 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
Jim Mattson <jmattson@...gle.com>, erdemaktas@...gle.com,
Connor Kuehl <ckuehl@...hat.com>,
Sean Christopherson <seanjc@...gle.com>
Subject: [RFC PATCH v5 054/104] KVM: x86/tdp_mmu: Keep PRIVATE_PROHIBIT bit when zapping
From: Isaku Yamahata <isaku.yamahata@...el.com>
SPTE_PRIVATE_PROHIBIT specifies the share or private GPA is allowed or not.
It needs to be kept over zapping the EPT entry. Currently the EPT entry is
initialized shadow_init_value unconditionally to clear
SPTE_PRIVATE_PROHIBIT bit. To carry SPTE_PRIVATE_PROHIBIT bit, introduce a
helper function to get initial value for zapped entry with
SPTE_PRIVATE_PROHIBIT bit. Replace shadow_init_value with it.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/mmu/tdp_mmu.c | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 1949f81027a0..6d750563824d 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -610,6 +610,12 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
return true;
}
+static u64 shadow_init_spte(u64 old_spte)
+{
+ return shadow_init_value |
+ (is_private_prohibit_spte(old_spte) ? SPTE_PRIVATE_PROHIBIT : 0);
+}
+
static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter)
{
@@ -641,7 +647,8 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* shadow_init_value (which sets "suppress #VE" bit) so it
* can be set when EPT table entries are zapped.
*/
- WRITE_ONCE(*rcu_dereference(iter->sptep), shadow_init_value);
+ WRITE_ONCE(*rcu_dereference(iter->sptep),
+ shadow_init_spte(iter->old_spte));
return true;
}
@@ -853,7 +860,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
if (!shared) {
/* see comments in tdp_mmu_zap_spte_atomic() */
- tdp_mmu_set_spte(kvm, &iter, shadow_init_value);
+ tdp_mmu_set_spte(kvm, &iter,
+ shadow_init_spte(iter.old_spte));
flush = true;
} else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
/*
@@ -1038,11 +1046,14 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
new_spte = make_mmio_spte(vcpu,
tdp_iter_gfn_unalias(vcpu->kvm, iter),
pte_access);
- else
+ else {
wrprot = make_spte(vcpu, sp, fault->slot, pte_access,
tdp_iter_gfn_unalias(vcpu->kvm, iter),
fault->pfn, iter->old_spte, fault->prefetch,
true, fault->map_writable, &new_spte);
+ if (is_private_prohibit_spte(iter->old_spte))
+ new_spte |= SPTE_PRIVATE_PROHIBIT;
+ }
if (new_spte == iter->old_spte)
ret = RET_PF_SPURIOUS;
@@ -1335,7 +1346,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
* invariant that the PFN of a present * leaf SPTE can never change.
* See __handle_changed_spte().
*/
- tdp_mmu_set_spte(kvm, iter, shadow_init_value);
+ tdp_mmu_set_spte(kvm, iter, shadow_init_spte(iter->old_spte));
if (!pte_write(range->pte)) {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
--
2.25.1
Powered by blists - more mailing lists