diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index d400b6d9d246..44ba73601f50 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -720,7 +720,7 @@ void initialize_tlbstate_and_flush(void) * because all x86 flush operations are serializing and the * atomic64_read operation won't be reordered by the compiler. */ -static void flush_tlb_func(void *info) +static flush_tlb_func(void *info) { /* * We have three different tlb_gen values in here. They are: @@ -738,6 +738,7 @@ static void flush_tlb_func(void *info) u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); bool local = smp_processor_id() == f->initiating_cpu; unsigned long nr_invalidate = 0; + enum tlb_flush_reason; /* This code cannot presently handle being reentered. */ VM_WARN_ON(!irqs_disabled()); @@ -747,12 +748,21 @@ static void flush_tlb_func(void *info) count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); /* Can only happen on remote CPUs */ - if (f->mm && f->mm != loaded_mm) - return; + if (f->mm && f->mm != loaded_mm) { + flush_reason = TLB_FLUSH_SKIPPED; + goto done; + } + flush_reason = TLB_REMOTE_SHOOTDOWN; + } else if (f->mm == NULL) { + flush_reason = TLB_LOCAL_SHOOTDOWN; + } else { + flush_reason = TLB_LOCAL_MM_SHOOTDOWN; } - if (unlikely(loaded_mm == &init_mm)) - return; + if (unlikely(loaded_mm == &init_mm)) { + flush_reason = TLB_FLUSH_SKIPPED; + goto done; + } VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != loaded_mm->context.ctx_id); @@ -768,7 +778,8 @@ static void flush_tlb_func(void *info) * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); - return; + flush_reason = TLB_FLUSH_SKIPPED; + goto done; } if (unlikely(local_tlb_gen == mm_tlb_gen)) { @@ -778,6 +789,7 @@ static void flush_tlb_func(void *info) * be handled can catch us all the way up, leaving no work for * the second flush. */ + flush_reason = TLB_FLUSH_SKIPPED; goto done; } @@ -849,10 +861,7 @@ static void flush_tlb_func(void *info) /* Tracing is done in a unified manner to reduce the code size */ done: - trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN : - (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : - TLB_LOCAL_MM_SHOOTDOWN, - nr_invalidate); + trace_tlb_flush(flush_reason, nr_invalidate); } static bool tlb_is_not_lazy(int cpu, void *data)