>From b9300a363d5517098bde6c97b8292a71092aa455 Mon Sep 17 00:00:00 2001 From: James Houghton Date: Tue, 6 May 2025 15:38:31 -0700 Subject: [PATCH 6/7] KVM: arm64: Add support for KVM userfault exits Signed-off-by: James Houghton Signed-off-by: Sean Christopherson --- arch/arm64/kvm/mmu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c5d21bcfa3ed..5e2ccde66f43 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1541,7 +1541,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * logging_active is guaranteed to never be true for VM_PFNMAP * memslots. */ - if (logging_active || is_protected_kvm_enabled()) { + if (logging_active || is_protected_kvm_enabled() || + kvm_is_userfault_memslot(memslot)) { force_pte = true; vma_shift = PAGE_SHIFT; } else { @@ -1630,6 +1631,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, mmu_seq = vcpu->kvm->mmu_invalidate_seq; mmap_read_unlock(current->mm); + if (kvm_do_userfault(vcpu, &fault)) + return -EFAULT; + pfn = __kvm_faultin_pfn(memslot, fault.gfn, fault.write ? FOLL_WRITE : 0, &writable, &page); if (pfn == KVM_PFN_ERR_HWPOISON) { @@ -2127,14 +2131,19 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_memory_slot *new, enum kvm_mr_change change) { - bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES; + u32 old_flags = old ? old->flags : 0; + u32 new_flags = new ? new->flags : 0; + + /* Nothing to do if not toggling dirty logging. */ + if (!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)) + return; /* * At this point memslot has been committed and there is an * allocated dirty_bitmap[], dirty pages will be tracked while the * memory slot is write protected. */ - if (log_dirty_pages) { + if (new_flags & KVM_MEM_LOG_DIRTY_PAGES) { if (change == KVM_MR_DELETE) return; -- 2.49.0.967.g6a0df3ecc3-goog