[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220118110621.62462-2-nikunj@amd.com>
Date: Tue, 18 Jan 2022 16:36:16 +0530
From: Nikunj A Dadhania <nikunj@....com>
To: Paolo Bonzini <pbonzini@...hat.com>
CC: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Brijesh Singh <brijesh.singh@....com>,
Tom Lendacky <thomas.lendacky@....com>,
Peter Gonda <pgonda@...gle.com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, Nikunj A Dadhania <nikunj@....com>
Subject: [RFC PATCH 1/6] KVM: x86/mmu: Add hook to pin PFNs on demand in MMU
Use vendor code via kvm_x86_ops hooks for pinning.
Signed-off-by: Nikunj A Dadhania <nikunj@....com>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/mmu/mmu.c | 3 +++
arch/x86/kvm/mmu/tdp_mmu.c | 7 +++++++
4 files changed, 13 insertions(+)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index f658bb4dbb74..a96c52a99a04 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -87,6 +87,7 @@ KVM_X86_OP(set_tss_addr)
KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
+KVM_X86_OP(pin_spte)
KVM_X86_OP_NULL(has_wbinvd_exit)
KVM_X86_OP(get_l2_tsc_offset)
KVM_X86_OP(get_l2_tsc_multiplier)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0677b9ea01c9..1263a16dd588 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1417,6 +1417,8 @@ struct kvm_x86_ops {
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level);
+ void (*pin_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
+ kvm_pfn_t pfn);
bool (*has_wbinvd_exit)(void);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1d275e9d76b5..62dda588eb99 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2977,6 +2977,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return ret;
direct_pte_prefetch(vcpu, it.sptep);
+ if (!is_error_noslot_pfn(fault->pfn) && !kvm_is_reserved_pfn(fault->pfn))
+ static_call_cond(kvm_x86_pin_spte)(vcpu->kvm, base_gfn,
+ it.level, fault->pfn);
++vcpu->stat.pf_fixed;
return ret;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7b1bc816b7c3..b7578fa02e9f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -467,6 +467,13 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
(!is_present || !is_dirty_spte(new_spte) || pfn_changed))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
+ /*
+ * Call the vendor code to handle the pinning
+ */
+ if (is_present && is_leaf)
+ static_call_cond(kvm_x86_pin_spte)(kvm, gfn, level,
+ spte_to_pfn(new_spte));
+
/*
* Recursively handle child PTs if the change removed a subtree from
* the paging structure.
--
2.32.0
Powered by blists - more mailing lists