[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231230172351.574091-18-michael.roth@amd.com>
Date: Sat, 30 Dec 2023 11:23:33 -0600
From: Michael Roth <michael.roth@....com>
To: <kvm@...r.kernel.org>
CC: <linux-coco@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-crypto@...r.kernel.org>, <x86@...nel.org>,
<linux-kernel@...r.kernel.org>, <tglx@...utronix.de>, <mingo@...hat.com>,
<jroedel@...e.de>, <thomas.lendacky@....com>, <hpa@...or.com>,
<ardb@...nel.org>, <pbonzini@...hat.com>, <seanjc@...gle.com>,
<vkuznets@...hat.com>, <jmattson@...gle.com>, <luto@...nel.org>,
<dave.hansen@...ux.intel.com>, <slp@...hat.com>, <pgonda@...gle.com>,
<peterz@...radead.org>, <srinivas.pandruvada@...ux.intel.com>,
<rientjes@...gle.com>, <dovmurik@...ux.ibm.com>, <tobin@....com>,
<bp@...en8.de>, <vbabka@...e.cz>, <kirill@...temov.name>,
<ak@...ux.intel.com>, <tony.luck@...el.com>,
<sathyanarayanan.kuppuswamy@...ux.intel.com>, <alpergun@...gle.com>,
<jarkko@...nel.org>, <ashish.kalra@....com>, <nikunj.dadhania@....com>,
<pankaj.gupta@....com>, <liam.merwick@...cle.com>, <zhi.a.wang@...el.com>,
Vishal Annapurve <vannapurve@...gle.com>
Subject: [PATCH v11 17/35] KVM: Add HVA range operator
From: Vishal Annapurve <vannapurve@...gle.com>
Introduce HVA range operator so that other KVM subsystems can operate on
HVA ranges.
Signed-off-by: Vishal Annapurve <vannapurve@...gle.com>
[mdr: minor checkpatch alignment fixups]
Signed-off-by: Michael Roth <michael.roth@....com>
---
include/linux/kvm_host.h | 6 +++++
virt/kvm/kvm_main.c | 49 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 55 insertions(+)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a2a8331fbb94..bc3a468e97e3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1429,6 +1429,12 @@ void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
void kvm_mmu_invalidate_end(struct kvm *kvm);
bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+typedef int (*kvm_hva_range_op_t)(struct kvm *kvm,
+ struct kvm_gfn_range *range, void *data);
+
+int kvm_vm_do_hva_range_op(struct kvm *kvm, unsigned long hva_start,
+ unsigned long hva_end, kvm_hva_range_op_t handler, void *data);
+
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4fd0fb0044f5..03243a7ece08 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -667,6 +667,55 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
return r;
}
+int kvm_vm_do_hva_range_op(struct kvm *kvm, unsigned long hva_start,
+ unsigned long hva_end, kvm_hva_range_op_t handler, void *data)
+{
+ int ret = 0;
+ struct kvm_gfn_range gfn_range;
+ struct kvm_memory_slot *slot;
+ struct kvm_memslots *slots;
+ int i, idx;
+
+ if (WARN_ON_ONCE(hva_end <= hva_start))
+ return -EINVAL;
+
+ idx = srcu_read_lock(&kvm->srcu);
+
+ for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
+ struct interval_tree_node *node;
+
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot_in_hva_range(node, slots,
+ hva_start, hva_end - 1) {
+ unsigned long start, end;
+
+ slot = container_of(node, struct kvm_memory_slot,
+ hva_node[slots->node_idx]);
+ start = max(hva_start, slot->userspace_addr);
+ end = min(hva_end, slot->userspace_addr +
+ (slot->npages << PAGE_SHIFT));
+
+ /*
+ * {gfn(page) | page intersects with [hva_start, hva_end)} =
+ * {gfn_start, gfn_start+1, ..., gfn_end-1}.
+ */
+ gfn_range.start = hva_to_gfn_memslot(start, slot);
+ gfn_range.end = hva_to_gfn_memslot(end + PAGE_SIZE - 1, slot);
+ gfn_range.slot = slot;
+
+ ret = handler(kvm, &gfn_range, data);
+ if (ret)
+ goto e_ret;
+ }
+ }
+
+e_ret:
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_vm_do_hva_range_op);
+
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
--
2.25.1
Powered by blists - more mailing lists