[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7b7dd4d56249028aa0b84d439ffdf1b79e67322a.1709288671.git.isaku.yamahata@intel.com>
Date: Fri, 1 Mar 2024 09:28:47 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
linux-kernel@...r.kernel.org,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Michael Roth <michael.roth@....com>,
David Matlack <dmatlack@...gle.com>,
Federico Parola <federico.parola@...ito.it>
Subject: [RFC PATCH 5/8] KVM: x86/mmu: Introduce kvm_mmu_map_page() for prepopulating guest memory
From: Isaku Yamahata <isaku.yamahata@...el.com>
Introduce a helper function to call kvm fault handler. This allows
a new ioctl to invoke kvm fault handler to populate without seeing
RET_PF_* enums or other KVM MMU internal definitions.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/mmu.h | 3 +++
arch/x86/kvm/mmu/mmu.c | 30 ++++++++++++++++++++++++++++++
2 files changed, 33 insertions(+)
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 60f21bb4c27b..48870c5e08ec 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -183,6 +183,9 @@ static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
}
+int kvm_mmu_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+ u8 max_level, u8 *goal_level);
+
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e4cc7f764980..7d5e80d17977 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4659,6 +4659,36 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return direct_page_fault(vcpu, fault);
}
+int kvm_mmu_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+ u8 max_level, u8 *goal_level)
+{
+ struct kvm_page_fault fault = KVM_PAGE_FAULT_INIT(vcpu, gpa, error_code,
+ false, max_level);
+ int r;
+
+ r = __kvm_mmu_do_page_fault(vcpu, &fault);
+
+ if (is_error_noslot_pfn(fault.pfn) || vcpu->kvm->vm_bugged)
+ return -EFAULT;
+
+ switch (r) {
+ case RET_PF_RETRY:
+ return -EAGAIN;
+
+ case RET_PF_FIXED:
+ case RET_PF_SPURIOUS:
+ *goal_level = fault.goal_level;
+ return 0;
+
+ case RET_PF_CONTINUE:
+ case RET_PF_EMULATE:
+ case RET_PF_INVALID:
+ default:
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_map_page);
+
static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
--
2.25.1
Powered by blists - more mailing lists