>From 44ec300f1e47fce3ac3893d5fbd8834705db8d58 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 May 2025 14:49:12 -0700 Subject: [PATCH 3/7] KVM: arm64: x86: Require "struct kvm_page_fault" for memory fault exits Now that both arm64 and x86 define "struct kvm_page_fault" with a base set of fields, rework kvm_prepare_memory_fault_exit() to take a kvm_page_fault structure instead of passing in a pile of parameters. Guard the related code with CONFIG_KVM_GENERIC_PAGE_FAULT to play nice with architectures that don't yet support kvm_page_fault. Rather than define a common kvm_page_fault and kvm_arch_page_fault child, simply assert that the handful of required fields are provided by the arch-defined structure. Unlike vCPU and VMs, the number of common fields is expected to be small, and letting arch code fully define the structure allows for maximum flexibility with respect to const, layout, etc. No functional change intended. Signed-off-by: Sean Christopherson --- arch/arm64/kvm/Kconfig | 1 + arch/x86/kvm/Kconfig | 1 + arch/x86/kvm/mmu/mmu.c | 8 ++++---- arch/x86/kvm/mmu/mmu_internal.h | 10 +--------- include/linux/kvm_host.h | 26 ++++++++++++++++++++------ virt/kvm/Kconfig | 3 +++ 6 files changed, 30 insertions(+), 19 deletions(-) diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 096e45acadb2..35b18f77afc4 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -38,6 +38,7 @@ menuconfig KVM select HAVE_KVM_VCPU_RUN_PID_CHANGE select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS + select KVM_GENERIC_PAGE_FAULT help Support hosting virtualized guest machines. diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 2eeffcec5382..2d5966f15738 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -48,6 +48,7 @@ config KVM_X86 select KVM_GENERIC_PRE_FAULT_MEMORY select KVM_GENERIC_PRIVATE_MEM if KVM_SW_PROTECTED_VM select KVM_WERROR if WERROR + select KVM_GENERIC_PAGE_FAULT config KVM tristate "Kernel-based Virtual Machine (KVM) support" diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cbc84c6abc2e..a4439e9e0726 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3429,7 +3429,7 @@ static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu, gva_t gva = fault->is_tdp ? 0 : fault->addr; if (fault->is_private) { - kvm_mmu_prepare_memory_fault_exit(vcpu, fault); + kvm_prepare_memory_fault_exit(vcpu, fault); return -EFAULT; } @@ -4499,14 +4499,14 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu, int max_order, r; if (!kvm_slot_can_be_private(fault->slot)) { - kvm_mmu_prepare_memory_fault_exit(vcpu, fault); + kvm_prepare_memory_fault_exit(vcpu, fault); return -EFAULT; } r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn, &fault->refcounted_page, &max_order); if (r) { - kvm_mmu_prepare_memory_fault_exit(vcpu, fault); + kvm_prepare_memory_fault_exit(vcpu, fault); return r; } @@ -4586,7 +4586,7 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, * private vs. shared mismatch. */ if (fault->is_private != kvm_mem_is_private(kvm, fault->gfn)) { - kvm_mmu_prepare_memory_fault_exit(vcpu, fault); + kvm_prepare_memory_fault_exit(vcpu, fault); return -EFAULT; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 384fc4d0bfec..c15060ed6e8b 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -269,14 +269,6 @@ enum { */ static_assert(RET_PF_CONTINUE == 0); -static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault) -{ - kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, - PAGE_SIZE, fault->write, fault->exec, - fault->is_private); -} - static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err, bool prefetch, int *emulation_type, u8 *level) @@ -329,7 +321,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, */ if (r == RET_PF_EMULATE && fault.is_private) { pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n"); - kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); + kvm_prepare_memory_fault_exit(vcpu, &fault); return -EFAULT; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c685fb417e92..adece3cbfb02 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2493,20 +2493,34 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr) /* Max number of entries allowed for each kvm dirty ring */ #define KVM_DIRTY_RING_MAX_ENTRIES 65536 +#ifdef CONFIG_KVM_GENERIC_PAGE_FAULT + +#define KVM_ASSERT_TYPE_IS(type_t, x) \ +do { \ + type_t __maybe_unused tmp; \ + \ + BUILD_BUG_ON(!__types_ok(tmp, x) || !__typecheck(tmp, x)); \ +}while (0) + static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, - gpa_t gpa, gpa_t size, - bool is_write, bool is_exec, - bool is_private) + struct kvm_page_fault *fault) { + KVM_ASSERT_TYPE_IS(gfn_t, fault->gfn); + KVM_ASSERT_TYPE_IS(bool, fault->exec); + KVM_ASSERT_TYPE_IS(bool, fault->write); + KVM_ASSERT_TYPE_IS(bool, fault->is_private); + KVM_ASSERT_TYPE_IS(struct kvm_memory_slot *, fault->slot); + vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; - vcpu->run->memory_fault.gpa = gpa; - vcpu->run->memory_fault.size = size; + vcpu->run->memory_fault.gpa = fault->gfn << PAGE_SHIFT; + vcpu->run->memory_fault.size = PAGE_SIZE; /* RWX flags are not (yet) defined or communicated to userspace. */ vcpu->run->memory_fault.flags = 0; - if (is_private) + if (fault->is_private) vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; } +#endif #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 727b542074e7..28ed6b241578 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -128,3 +128,6 @@ config HAVE_KVM_ARCH_GMEM_PREPARE config HAVE_KVM_ARCH_GMEM_INVALIDATE bool depends on KVM_PRIVATE_MEM + +config KVM_GENERIC_PAGE_FAULT + bool -- 2.49.0.967.g6a0df3ecc3-goog