>From e8cf843428aadfcec342f5e84ba4fd6ad7f16aa1 Mon Sep 17 00:00:00 2001 From: James Houghton Date: Tue, 6 May 2025 15:37:14 -0700 Subject: [PATCH 5/7] KVM: x86: Add support for KVM userfault exits Signed-off-by: James Houghton Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 5 ++++- arch/x86/kvm/x86.c | 27 +++++++++++++++++---------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a4439e9e0726..49eb6b9b268c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3304,7 +3304,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (is_error_noslot_pfn(fault->pfn)) return; - if (kvm_slot_dirty_track_enabled(slot)) + if (kvm_slot_dirty_track_enabled(slot) || kvm_is_userfault_memslot(slot)) return; /* @@ -4522,6 +4522,9 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu, { unsigned int foll = fault->write ? FOLL_WRITE : 0; + if (kvm_do_userfault(vcpu, fault)) + return -EFAULT; + if (fault->is_private) return kvm_mmu_faultin_pfn_private(vcpu, fault); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 75c0a934556d..4f9edda47782 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13150,12 +13150,27 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, u32 new_flags = new ? new->flags : 0; bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; + /* + * Recover hugepages when userfault is toggled off, as KVM forces 4KiB + * mappings when userfault is enabled. See below for why CREATE, MOVE, + * and DELETE don't need special handling. Note, common KVM handles + * zapping SPTEs when userfault is toggled on. + */ + if (change == KVM_MR_FLAGS_ONLY && (old_flags & KVM_MEM_USERFAULT) && + !(new_flags & KVM_MEM_USERFAULT)) + kvm_mmu_recover_huge_pages(kvm, new); + + /* + * Nothing more to do if dirty logging isn't being toggled. + */ + if (!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)) + return; + /* * Update CPU dirty logging if dirty logging is being toggled. This * applies to all operations. */ - if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) - kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); + kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); /* * Nothing more to do for RO slots (which can't be dirtied and can't be @@ -13175,14 +13190,6 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) return; - /* - * READONLY and non-flags changes were filtered out above, and the only - * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty - * logging isn't being toggled on or off. - */ - if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) - return; - if (!log_dirty_pages) { /* * Recover huge page mappings in the slot now that dirty logging -- 2.49.0.967.g6a0df3ecc3-goog