[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240710234222.2333120-11-jthoughton@google.com>
Date: Wed, 10 Jul 2024 23:42:14 +0000
From: James Houghton <jthoughton@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
James Morse <james.morse@....com>, Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>, Sean Christopherson <seanjc@...gle.com>, Shuah Khan <shuah@...nel.org>,
Peter Xu <peterx@...hat.org>, Axel Rasmussen <axelrasmussen@...gle.com>,
David Matlack <dmatlack@...gle.com>, James Houghton <jthoughton@...gle.com>, kvm@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev
Subject: [RFC PATCH 10/18] KVM: arm64: Add KVM Userfault support
Support comes in three parts:
1. When KVM Userfault is enabled, only install PAGE_SIZE PTEs. This
prevents us from being able to map a userfault-enabled pfn with a
huge PTE in response to a fault on a non-userfault pfn.
2. When we get KVM_PFN_ERR_USERFAULT from __gfn_to_pfn_memslot, return a
memory fault to userspace.
3. When KVM Userfault is enabled for a particular kvm_gfn_range, unmap
it, so that we can get faults on it.
Signed-off-by: James Houghton <jthoughton@...gle.com>
---
arch/arm64/kvm/Kconfig | 1 +
arch/arm64/kvm/mmu.c | 36 ++++++++++++++++++++++++++++++++++--
2 files changed, 35 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 58f09370d17e..358153d91d58 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -37,6 +37,7 @@ menuconfig KVM
select HAVE_KVM_VCPU_RUN_PID_CHANGE
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
+ select KVM_USERFAULT
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8bcab0cc3fe9..ac283e606516 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1434,7 +1434,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* logging_active is guaranteed to never be true for VM_PFNMAP
* memslots.
*/
- if (logging_active) {
+ if (logging_active || kvm->userfault) {
force_pte = true;
vma_shift = PAGE_SHIFT;
} else {
@@ -1494,8 +1494,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
}
- if (is_error_noslot_pfn(pfn))
+ if (is_error_noslot_pfn(pfn)) {
+ if (pfn == KVM_PFN_ERR_USERFAULT)
+ kvm_prepare_memory_fault_exit(vcpu, gfn << PAGE_SHIFT,
+ PAGE_SIZE, write_fault,
+ /*exec=*/false,
+ /*private=*/false,
+ /*userfault=*/true);
return -EFAULT;
+ }
if (kvm_is_device_pfn(pfn)) {
/*
@@ -2105,3 +2112,28 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range)
+{
+ unsigned long attrs = range->arg.attributes;
+
+ /*
+ * We only need to unmap if we're enabling userfault. Disabling it
+ * does not need an unmap. An unmap to get huge mappings will come
+ * later.
+ */
+ if (attrs & KVM_MEMORY_ATTRIBUTE_USERFAULT)
+ kvm_unmap_gfn_range(kvm, range);
+
+ return false;
+}
+
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range)
+{
+ /* Nothing to do! */
+ return false;
+}
+#endif
--
2.45.2.993.g49e7a77208-goog
Powered by blists - more mailing lists