[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250618042424.330664-3-jthoughton@google.com>
Date: Wed, 18 Jun 2025 04:24:11 +0000
From: James Houghton <jthoughton@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Sean Christopherson <seanjc@...gle.com>,
Oliver Upton <oliver.upton@...ux.dev>
Cc: Jonathan Corbet <corbet@....net>, Marc Zyngier <maz@...nel.org>, Yan Zhao <yan.y.zhao@...el.com>,
James Houghton <jthoughton@...gle.com>, Nikita Kalyazin <kalyazin@...zon.com>,
Anish Moorthy <amoorthy@...gle.com>, Peter Gonda <pgonda@...gle.com>, Peter Xu <peterx@...hat.com>,
David Matlack <dmatlack@...gle.com>, wei.w.wang@...el.com, kvm@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev
Subject: [PATCH v3 02/15] KVM: arm64: Add "struct kvm_page_fault" to gather
common fault variables
From: Sean Christopherson <seanjc@...gle.com>
Introduce "struct kvm_page_fault" and use it in user_mem_abort() in lieu
of a collection of local variables. Providing "struct kvm_page_fault"
will allow common KVM to provide APIs to take in said structure, e.g. when
preparing memory fault exits.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
arch/arm64/include/asm/kvm_host.h | 9 +++++++++
arch/arm64/kvm/mmu.c | 32 +++++++++++++++++--------------
2 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 6ce2c51734820..ae83d95d11b74 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -413,6 +413,15 @@ struct kvm_vcpu_fault_info {
u64 disr_el1; /* Deferred [SError] Status Register */
};
+struct kvm_page_fault {
+ const bool exec;
+ const bool write;
+ const bool is_private;
+
+ gfn_t gfn;
+ struct kvm_memory_slot *slot;
+};
+
/*
* VNCR() just places the VNCR_capable registers in the enum after
* __VNCR_START__, and the value (after correction) to be an 8-byte offset
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 2942ec92c5a4a..0c209f2e1c7b2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1476,8 +1476,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool fault_is_perm)
{
int ret = 0;
- bool write_fault, writable, force_pte = false;
- bool exec_fault, mte_allowed;
+ bool writable, force_pte = false;
+ bool mte_allowed;
bool device = false, vfio_allow_any_uc = false;
unsigned long mmu_seq;
phys_addr_t ipa = fault_ipa;
@@ -1485,7 +1485,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct vm_area_struct *vma;
short vma_shift;
void *memcache;
- gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
long vma_pagesize, fault_granule;
@@ -1494,13 +1493,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct page *page;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED;
+ struct kvm_page_fault fault = {
+ .write = kvm_is_write_fault(vcpu),
+ .exec = kvm_vcpu_trap_is_exec_fault(vcpu),
+
+ .slot = memslot,
+ };
+
if (fault_is_perm)
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
- VM_BUG_ON(write_fault && exec_fault);
+ VM_BUG_ON(fault.write && fault.exec);
- if (fault_is_perm && !write_fault && !exec_fault) {
+ if (fault_is_perm && !fault.write && !fault.exec) {
kvm_err("Unexpected L2 read permission error\n");
return -EFAULT;
}
@@ -1516,7 +1520,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- if (!fault_is_perm || (logging_active && write_fault)) {
+ if (!fault_is_perm || (logging_active && fault.write)) {
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
if (!is_protected_kvm_enabled())
@@ -1614,7 +1618,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
ipa &= ~(vma_pagesize - 1);
}
- gfn = ipa >> PAGE_SHIFT;
+ fault.gfn = ipa >> PAGE_SHIFT;
mte_allowed = kvm_vma_mte_allowed(vma);
vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
@@ -1633,7 +1637,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
- pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
+ pfn = __kvm_faultin_pfn(memslot, fault.gfn, fault.write ? FOLL_WRITE : 0,
&writable, &page);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
@@ -1654,7 +1658,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* change things at the last minute.
*/
device = true;
- } else if (logging_active && !write_fault) {
+ } else if (logging_active && !fault.write) {
/*
* Only actually map the page as writable if this was a write
* fault.
@@ -1662,7 +1666,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
writable = false;
}
- if (exec_fault && device)
+ if (fault.exec && device)
return -ENOEXEC;
/*
@@ -1722,7 +1726,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (writable)
prot |= KVM_PGTABLE_PROT_W;
- if (exec_fault)
+ if (fault.exec)
prot |= KVM_PGTABLE_PROT_X;
if (device) {
@@ -1759,7 +1763,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret)
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ mark_page_dirty_in_slot(kvm, memslot, fault.gfn);
return ret != -EAGAIN ? ret : 0;
}
--
2.50.0.rc2.692.g299adb8693-goog
Powered by blists - more mailing lists