[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210126124444.27136-6-zhukeqian1@huawei.com>
Date: Tue, 26 Jan 2021 20:44:42 +0800
From: Keqian Zhu <zhukeqian1@...wei.com>
To: <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>, <kvm@...r.kernel.org>,
<kvmarm@...ts.cs.columbia.edu>, Marc Zyngier <maz@...nel.org>,
Will Deacon <will@...nel.org>,
Catalin Marinas <catalin.marinas@....com>
CC: Alex Williamson <alex.williamson@...hat.com>,
Kirti Wankhede <kwankhede@...dia.com>,
Cornelia Huck <cohuck@...hat.com>,
Mark Rutland <mark.rutland@....com>,
James Morse <james.morse@....com>,
Robin Murphy <robin.murphy@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
<wanghaibin.wang@...wei.com>, <jiangkunkun@...wei.com>,
<xiexiangyou@...wei.com>, <zhengchuan@...wei.com>,
<yubihong@...wei.com>
Subject: [RFC PATCH 5/7] kvm: arm64: Add some HW_DBM related mmu interfaces
This adds set_dbm, clear_dbm and sync_dirty interfaces in mmu layer.
They simply wrap those interfaces of pgtable layer, adding reschedule
semantics.
Signed-off-by: Keqian Zhu <zhukeqian1@...wei.com>
---
arch/arm64/include/asm/kvm_mmu.h | 7 +++++++
arch/arm64/kvm/mmu.c | 30 ++++++++++++++++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e52d82aeadca..51dd31ba1b94 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -183,6 +183,13 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
void free_hyp_pgds(void);
+void kvm_stage2_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages);
+void kvm_stage2_set_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages);
+void kvm_stage2_sync_dirty(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages);
+
void stage2_unmap_vm(struct kvm *kvm);
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7d2257cc5438..18717fd12731 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -609,6 +609,36 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
+void kvm_stage2_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t addr = base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + npages) << PAGE_SHIFT;
+
+ stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_clear_dbm);
+}
+
+void kvm_stage2_set_dbm(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t addr = base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + npages) << PAGE_SHIFT;
+
+ stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_set_dbm);
+}
+
+void kvm_stage2_sync_dirty(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long npages)
+{
+ phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
+ phys_addr_t addr = base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (base_gfn + npages) << PAGE_SHIFT;
+
+ stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_sync_dirty);
+}
+
static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
__clean_dcache_guest_page(pfn, size);
--
2.19.1
Powered by blists - more mailing lists