>From 20b0bb4681a05d36c52fce90c932cd239b604d42 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 6 May 2025 14:43:11 -0700 Subject: [PATCH 2/7] KVM: arm64: Add "struct kvm_page_fault" to gather common fault variables Introduce "struct kvm_page_fault" and use it in user_mem_abort() in lieu of a collection of local variables. Providing "struct kvm_page_fault" will allow common KVM to provide APIs to take in said structure, e.g. when preparing memory fault exits. No functional change intended. Signed-off-by: Sean Christopherson --- arch/arm64/include/asm/kvm_host.h | 9 +++++++++ arch/arm64/kvm/mmu.c | 32 +++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 08ba91e6fb03..50a04b86baaa 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -403,6 +403,15 @@ struct kvm_vcpu_fault_info { u64 disr_el1; /* Deferred [SError] Status Register */ }; +struct kvm_page_fault { + const bool exec; + const bool write; + const bool is_private; + + gfn_t gfn; + struct kvm_memory_slot *slot; +}; + /* * VNCR() just places the VNCR_capable registers in the enum after * __VNCR_START__, and the value (after correction) to be an 8-byte offset diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 754f2fe0cc67..c5d21bcfa3ed 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1472,8 +1472,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, bool fault_is_perm) { int ret = 0; - bool write_fault, writable, force_pte = false; - bool exec_fault, mte_allowed; + bool writable, force_pte = false; + bool mte_allowed; bool device = false, vfio_allow_any_uc = false; unsigned long mmu_seq; phys_addr_t ipa = fault_ipa; @@ -1481,7 +1481,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct vm_area_struct *vma; short vma_shift; void *memcache; - gfn_t gfn; kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); long vma_pagesize, fault_granule; @@ -1490,13 +1489,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct page *page; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED; + struct kvm_page_fault fault = { + .write = kvm_is_write_fault(vcpu), + .exec = kvm_vcpu_trap_is_exec_fault(vcpu), + + .slot = memslot, + }; + if (fault_is_perm) fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); - write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); - VM_BUG_ON(write_fault && exec_fault); + VM_BUG_ON(fault.write && fault.exec); - if (fault_is_perm && !write_fault && !exec_fault) { + if (fault_is_perm && !fault.write && !fault.exec) { kvm_err("Unexpected L2 read permission error\n"); return -EFAULT; } @@ -1507,7 +1511,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * only exception to this is when dirty logging is enabled at runtime * and a write fault needs to collapse a block entry into a table. */ - if (!fault_is_perm || (logging_active && write_fault)) { + if (!fault_is_perm || (logging_active && fault.write)) { int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu); if (!is_protected_kvm_enabled()) { @@ -1607,7 +1611,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ipa &= ~(vma_pagesize - 1); } - gfn = ipa >> PAGE_SHIFT; + fault.gfn = ipa >> PAGE_SHIFT; mte_allowed = kvm_vma_mte_allowed(vma); vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; @@ -1626,7 +1630,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, mmu_seq = vcpu->kvm->mmu_invalidate_seq; mmap_read_unlock(current->mm); - pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0, + pfn = __kvm_faultin_pfn(memslot, fault.gfn, fault.write ? FOLL_WRITE : 0, &writable, &page); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); @@ -1647,7 +1651,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * change things at the last minute. */ device = true; - } else if (logging_active && !write_fault) { + } else if (logging_active && !fault.write) { /* * Only actually map the page as writable if this was a write * fault. @@ -1655,7 +1659,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, writable = false; } - if (exec_fault && device) + if (fault.exec && device) return -ENOEXEC; /* @@ -1715,7 +1719,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (writable) prot |= KVM_PGTABLE_PROT_W; - if (exec_fault) + if (fault.exec) prot |= KVM_PGTABLE_PROT_X; if (device) { @@ -1752,7 +1756,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) - mark_page_dirty_in_slot(kvm, memslot, gfn); + mark_page_dirty_in_slot(kvm, memslot, fault.gfn); return ret != -EAGAIN ? ret : 0; } -- 2.49.0.967.g6a0df3ecc3-goog