[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250311040321.1460-5-yezhenyu2@huawei.com>
Date: Tue, 11 Mar 2025 12:03:20 +0800
From: Zhenyu Ye <yezhenyu2@...wei.com>
To: <maz@...nel.org>, <yuzenghui@...wei.com>, <will@...nel.org>,
<oliver.upton@...ux.dev>, <catalin.marinas@....com>, <joey.gouly@....com>
CC: <linux-kernel@...r.kernel.org>, <yezhenyu2@...wei.com>,
<xiexiangyou@...wei.com>, <zhengchuan@...wei.com>, <wangzhou1@...ilicon.com>,
<linux-arm-kernel@...ts.infradead.org>, <kvm@...r.kernel.org>,
<kvmarm@...ts.linux.dev>
Subject: [PATCH v1 4/5] arm64/kvm: support to handle the HDBSSF event
From: eillon <yezhenyu2@...wei.com>
Updating the dirty bitmap based on the HDBSS buffer. Similar
to the implementation of the x86 pml feature, KVM flushes the
buffers on all VM-Exits, thus we only need to kick running
vCPUs to force a VM-Exit.
Signed-off-by: eillon <yezhenyu2@...wei.com>
---
arch/arm64/kvm/arm.c | 10 ++++++++
arch/arm64/kvm/handle_exit.c | 47 ++++++++++++++++++++++++++++++++++++
arch/arm64/kvm/mmu.c | 7 ++++++
3 files changed, 64 insertions(+)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 825cfef3b1c2..fceceeead011 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1845,7 +1845,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
+ /*
+ * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
+ * before reporting dirty_bitmap to userspace. KVM flushes the buffers
+ * on all VM-Exits, thus we only need to kick running vCPUs to force a
+ * VM-Exit.
+ */
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
}
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 512d152233ff..db9d7e1f72bf 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -330,6 +330,50 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
return arm_exit_handlers[esr_ec];
}
+#define HDBSS_ENTRY_VALID_SHIFT 0
+#define HDBSS_ENTRY_VALID_MASK (1UL << HDBSS_ENTRY_VALID_SHIFT)
+#define HDBSS_ENTRY_IPA_SHIFT 12
+#define HDBSS_ENTRY_IPA_MASK GENMASK_ULL(55, HDBSS_ENTRY_IPA_SHIFT)
+
+static void kvm_flush_hdbss_buffer(struct kvm_vcpu *vcpu)
+{
+ int idx, curr_idx;
+ u64 *hdbss_buf;
+
+ if (!vcpu->kvm->enable_hdbss)
+ return;
+
+ dsb(sy);
+ isb();
+ curr_idx = HDBSSPROD_IDX(read_sysreg_s(SYS_HDBSSPROD_EL2));
+
+ /* Do nothing if HDBSS buffer is empty or br_el2 is NULL */
+ if (curr_idx == 0 || vcpu->arch.hdbss.br_el2 == 0)
+ return;
+
+ hdbss_buf = page_address(phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)));
+ if (!hdbss_buf) {
+ kvm_err("Enter flush hdbss buffer with buffer == NULL!");
+ return;
+ }
+
+ for (idx = 0; idx < curr_idx; idx++) {
+ u64 gpa;
+
+ gpa = hdbss_buf[idx];
+ if (!(gpa & HDBSS_ENTRY_VALID_MASK))
+ continue;
+
+ gpa = gpa & HDBSS_ENTRY_IPA_MASK;
+ kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
+ }
+
+ /* reset HDBSS index */
+ write_sysreg_s(0, SYS_HDBSSPROD_EL2);
+ dsb(sy);
+ isb();
+}
+
/*
* We may be single-stepping an emulated instruction. If the emulation
* has been completed in the kernel, we can return to userspace with a
@@ -365,6 +409,9 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
{
struct kvm_run *run = vcpu->run;
+ if (vcpu->kvm->enable_hdbss)
+ kvm_flush_hdbss_buffer(vcpu);
+
if (ARM_SERROR_PENDING(exception_index)) {
/*
* The SError is handled by handle_exit_early(). If the guest
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 9c11e2292b1e..3e0781ae0ae1 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1790,6 +1790,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
ipa = fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ /*
+ * HDBSS buffer already flushed when enter handle_trap_exceptions().
+ * Nothing to do here.
+ */
+ if (ESR_ELx_ISS2(esr) & ESR_ELx_HDBSSF)
+ return 1;
+
if (esr_fsc_is_translation_fault(esr)) {
/* Beyond sanitised PARange (which is the IPA limit) */
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
--
2.39.3
Powered by blists - more mailing lists