[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190527094710.GU2623@hirez.programming.kicks-ass.net>
Date: Mon, 27 May 2019 11:47:10 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Juergen Gross <jgross@...e.com>
Cc: Nadav Amit <namit@...are.com>, Ingo Molnar <mingo@...hat.com>,
Andy Lutomirski <luto@...nel.org>,
Borislav Petkov <bp@...en8.de>, linux-kernel@...r.kernel.org,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Sasha Levin <sashal@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>, x86@...nel.org,
Paolo Bonzini <pbonzini@...hat.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
linux-hyperv@...r.kernel.org,
virtualization@...ts.linux-foundation.org, kvm@...r.kernel.org,
xen-devel@...ts.xenproject.org
Subject: Re: [RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs
concurrently
On Sat, May 25, 2019 at 10:54:50AM +0200, Juergen Gross wrote:
> On 25/05/2019 10:22, Nadav Amit wrote:
> > diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
> > index 946f8f1f1efc..3a156e63c57d 100644
> > --- a/arch/x86/include/asm/paravirt_types.h
> > +++ b/arch/x86/include/asm/paravirt_types.h
> > @@ -211,6 +211,12 @@ struct pv_mmu_ops {
> > void (*flush_tlb_user)(void);
> > void (*flush_tlb_kernel)(void);
> > void (*flush_tlb_one_user)(unsigned long addr);
> > + /*
> > + * flush_tlb_multi() is the preferred interface. When it is used,
> > + * flush_tlb_others() should return false.
>
> This comment does not make sense. flush_tlb_others() return type is
> void.
I suspect that is an artifact from before the static_key; an attempt to
make the pv interface less awkward.
Something like the below would work for KVM I suspect, the others
(Hyper-V and Xen are more 'interesting').
---
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -580,7 +580,7 @@ static void __init kvm_apf_trap_init(voi
static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
-static void kvm_flush_tlb_others(const struct cpumask *cpumask,
+static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
u8 state;
@@ -594,6 +594,9 @@ static void kvm_flush_tlb_others(const s
* queue flush_on_enter for pre-empted vCPUs
*/
for_each_cpu(cpu, flushmask) {
+ if (cpu == smp_processor_id())
+ continue;
+
src = &per_cpu(steal_time, cpu);
state = READ_ONCE(src->preempted);
if ((state & KVM_VCPU_PREEMPTED)) {
@@ -603,7 +606,7 @@ static void kvm_flush_tlb_others(const s
}
}
- native_flush_tlb_others(flushmask, info);
+ native_flush_tlb_multi(flushmask, info);
}
static void __init kvm_guest_init(void)
@@ -628,9 +631,8 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
+ pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
- static_key_disable(&flush_tlb_multi_enabled.key);
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
Powered by blists - more mailing lists