[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhV-H7zF7zDZQ0tHtZndTmWDteaV=nAwXL3Q1P2zcJssVt7tA@mail.gmail.com>
Date: Sun, 23 Jun 2024 15:54:26 +0800
From: Huacai Chen <chenhuacai@...nel.org>
To: Bibo Mao <maobibo@...ngson.cn>
Cc: Tianrui Zhao <zhaotianrui@...ngson.cn>, WANG Xuerui <kernel@...0n.name>,
Sean Christopherson <seanjc@...gle.com>, kvm@...r.kernel.org, loongarch@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 1/6] LoongArch: KVM: Delay secondary mmu tlb flush
until guest entry
Hi, Bibo,
On Wed, Jun 19, 2024 at 4:09 PM Bibo Mao <maobibo@...ngson.cn> wrote:
>
> If there is page fault for secondary mmu, there needs tlb flush
What does "secondary mmu" in this context mean? Maybe "guest mmu"?
Huacai
> operation indexed with fault gpa address and VMID. VMID is stored
> at register CSR_GSTAT and will be reload or recalculated during
> guest entry.
>
> Currently CSR_GSTAT is not saved and restored during vcpu context
> switch, it is recalculated during guest entry. So CSR_GSTAT is in
> effect only when vcpu runs in guest mode, however it may be not in
> effected if vcpu exits to host mode, since register CSR_GSTAT may
> be stale, it maybe records VMID of last scheduled vcpu, rather than
> current vcpu.
>
> Function kvm_flush_tlb_gpa() should be called with its real VMID,
> here move it to guest entrance. Also arch specific request id
> KVM_REQ_TLB_FLUSH_GPA is added to flush tlb, and it can be optimized
> if VMID is updated, since all guest tlb entries will be invalid if
> VMID is updated.
>
> Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
> ---
> arch/loongarch/include/asm/kvm_host.h | 2 ++
> arch/loongarch/kvm/main.c | 1 +
> arch/loongarch/kvm/mmu.c | 4 ++--
> arch/loongarch/kvm/tlb.c | 5 +----
> arch/loongarch/kvm/vcpu.c | 18 ++++++++++++++++++
> 5 files changed, 24 insertions(+), 6 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
> index c87b6ea0ec47..32c4948f534f 100644
> --- a/arch/loongarch/include/asm/kvm_host.h
> +++ b/arch/loongarch/include/asm/kvm_host.h
> @@ -30,6 +30,7 @@
> #define KVM_PRIVATE_MEM_SLOTS 0
>
> #define KVM_HALT_POLL_NS_DEFAULT 500000
> +#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
>
> #define KVM_GUESTDBG_SW_BP_MASK \
> (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
> @@ -190,6 +191,7 @@ struct kvm_vcpu_arch {
>
> /* vcpu's vpid */
> u64 vpid;
> + gpa_t flush_gpa;
>
> /* Frequency of stable timer in Hz */
> u64 timer_mhz;
> diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
> index 86a2f2d0cb27..844736b99d38 100644
> --- a/arch/loongarch/kvm/main.c
> +++ b/arch/loongarch/kvm/main.c
> @@ -242,6 +242,7 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
> kvm_update_vpid(vcpu, cpu);
> trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
> vcpu->cpu = cpu;
> + kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
> }
>
> /* Restore GSTAT(0x50).vpid */
> diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
> index 98883aa23ab8..9e39d28fec35 100644
> --- a/arch/loongarch/kvm/mmu.c
> +++ b/arch/loongarch/kvm/mmu.c
> @@ -908,8 +908,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
> return ret;
>
> /* Invalidate this entry in the TLB */
> - kvm_flush_tlb_gpa(vcpu, gpa);
> -
> + vcpu->arch.flush_gpa = gpa;
> + kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
> return 0;
> }
>
> diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c
> index 02535df6b51f..ebdbe9264e9c 100644
> --- a/arch/loongarch/kvm/tlb.c
> +++ b/arch/loongarch/kvm/tlb.c
> @@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void)
>
> void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
> {
> - unsigned long flags;
> -
> - local_irq_save(flags);
> + lockdep_assert_irqs_disabled();
> gpa &= (PAGE_MASK << 1);
> invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa);
> - local_irq_restore(flags);
> }
> diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
> index 9e8030d45129..b747bd8bc037 100644
> --- a/arch/loongarch/kvm/vcpu.c
> +++ b/arch/loongarch/kvm/vcpu.c
> @@ -51,6 +51,16 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
> return RESUME_GUEST;
> }
>
> +static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
> +{
> + lockdep_assert_irqs_disabled();
> + if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
> + if (vcpu->arch.flush_gpa != INVALID_GPA) {
> + kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
> + vcpu->arch.flush_gpa = INVALID_GPA;
> + }
> +}
> +
> /*
> * Check and handle pending signal and vCPU requests etc
> * Run with irq enabled and preempt enabled
> @@ -101,6 +111,13 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
> /* Make sure the vcpu mode has been written */
> smp_store_mb(vcpu->mode, IN_GUEST_MODE);
> kvm_check_vpid(vcpu);
> +
> + /*
> + * Called after function kvm_check_vpid()
> + * Since it updates csr_gstat used by kvm_flush_tlb_gpa(),
> + * also it may clear KVM_REQ_TLB_FLUSH_GPA pending bit
> + */
> + kvm_late_check_requests(vcpu);
> vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
> /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
> vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
> @@ -994,6 +1011,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> struct loongarch_csrs *csr;
>
> vcpu->arch.vpid = 0;
> + vcpu->arch.flush_gpa = INVALID_GPA;
>
> hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
> vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
> --
> 2.39.3
>
Powered by blists - more mailing lists