[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230109215347.3119271-3-rananta@google.com>
Date: Mon, 9 Jan 2023 21:53:43 +0000
From: Raghavendra Rao Ananta <rananta@...gle.com>
To: Oliver Upton <oupton@...gle.com>, Marc Zyngier <maz@...nel.org>,
Ricardo Koller <ricarkol@...gle.com>,
Reiji Watanabe <reijiw@...gle.com>,
James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Jing Zhang <jingzhangos@...gle.com>,
Colton Lewis <coltonlewis@...gle.com>,
Raghavendra Rao Anata <rananta@...gle.com>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Subject: [RFC PATCH 2/6] KVM: arm64: Add support for FEAT_TLBIRANGE
Define a generic function __kvm_tlb_flush_range() to
invalidate the TLBs over a range of addresses. Use
this to define __kvm_tlb_flush_range_vmid_ipa()
(for VHE and nVHE) to flush a range of stage-2
page-tables using IPA in one go.
If the system supports FEAT_TLBIRANGE, the following
patches would conviniently replace global TLBI such
as vmalls12e1is in the map, unmap, and dirty-logging
paths with ripas2e1is instead.
Signed-off-by: Raghavendra Rao Ananta <rananta@...gle.com>
---
arch/arm64/include/asm/kvm_asm.h | 21 +++++++++++++++++++++
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 +++++++++++
arch/arm64/kvm/hyp/nvhe/tlb.c | 24 ++++++++++++++++++++++++
arch/arm64/kvm/hyp/vhe/tlb.c | 20 ++++++++++++++++++++
4 files changed, 76 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 43c3bc0f9544d..bdf94ae0333b0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -79,6 +79,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_range_vmid_ipa,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -221,10 +222,30 @@ DECLARE_KVM_NVHE_SYM(__per_cpu_end);
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
+#define __kvm_tlb_flush_range(op, mmu, start, end, tlb_level) do { \
+ unsigned long pages, stride; \
+ \
+ stride = PAGE_SIZE; \
+ start = round_down(start, stride); \
+ end = round_up(end, stride); \
+ pages = (end - start) >> PAGE_SHIFT; \
+ \
+ if ((!system_supports_tlb_range() && \
+ (end - start) >= (MAX_TLBI_OPS * stride)) || \
+ pages >= MAX_TLBI_RANGE_PAGES) { \
+ __kvm_tlb_flush_vmid(mmu); \
+ break; \
+ } \
+ \
+ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false); \
+} while (0)
+
extern void __kvm_flush_vm_context(void);
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level);
+extern void __kvm_tlb_flush_range_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t start,
+ phys_addr_t end, int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 728e01d4536b0..ac52d0fbb9719 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -116,6 +116,16 @@ static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
__kvm_flush_vm_context();
}
+static void handle___kvm_tlb_flush_range_vmid_ipa(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
+ DECLARE_REG(phys_addr_t, end, host_ctxt, 3);
+ DECLARE_REG(int, level, host_ctxt, 4);
+
+ __kvm_tlb_flush_range_vmid_ipa(kern_hyp_va(mmu), start, end, level);
+}
+
static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -314,6 +324,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
+ HANDLE_FUNC(__kvm_tlb_flush_range_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index d296d617f5896..292f5c4834d08 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -55,6 +55,30 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
}
}
+void __kvm_tlb_flush_range_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t start,
+ phys_addr_t end, int level)
+{
+ struct tlb_inv_context cxt;
+
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt);
+
+ __kvm_tlb_flush_range(ipas2e1is, mmu, start, end, level);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+
+ /* See the comment below in __kvm_tlb_flush_vmid_ipa() */
+ if (icache_is_vpipt())
+ icache_inval_all_pou();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 24cef9b87f9e9..2631cc09e4184 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -79,6 +79,26 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
local_irq_restore(cxt->flags);
}
+void __kvm_tlb_flush_range_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t start,
+ phys_addr_t end, int level)
+{
+ struct tlb_inv_context cxt;
+
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt);
+
+ __kvm_tlb_flush_range(ipas2e1is, mmu, start, end, level);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{
--
2.39.0.314.g84b9a713c41-goog
Powered by blists - more mailing lists