[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170804111210.z72abnok3jen5f4v@hirez.programming.kicks-ass.net>
Date: Fri, 4 Aug 2017 13:12:10 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Andy Lutomirski <luto@...nel.org>
Cc: Vince Weaver <vincent.weaver@...ne.edu>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Stephane Eranian <eranian@...il.com>
Subject: Re: perf: bug in rdpmc/mmap accounting after exec
On Fri, Aug 04, 2017 at 11:17:02AM +0200, Peter Zijlstra wrote:
> Yeah, I'll make it pretty, write a changelog and things like that :-)
Slightly different from the earlier one in that I now pass @mm along
instead of @vma. Same thing otherwise.
---
Subject: perf,x86: Fix RDPMC vs mm_struct tracking
From: Peter Zijlstra <peterz@...radead.org>
Date: Wed, 2 Aug 2017 19:39:30 +0200
Vince reported:
> Failing test case:
>
> fd=perf_event_open();
> addr=mmap(fd);
> exec() // without closing or unmapping the event
> fd=perf_event_open();
> addr=mmap(fd);
> rdpmc() // GPFs due to rdpmc being disabled
The problem is of course that exec() plays tricks with what is
current->mm, only destroying the old mappings after having installed
the new mm.
Fix this confusion by passing along vma->vm_mm instead of relying on
current->mm.
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Stephane Eranian <eranian@...il.com>
Cc: stable@...r.kernel.org
Reported-by: Vince Weaver <vincent.weaver@...ne.edu>
Tested-by: Vince Weaver <vincent.weaver@...ne.edu>
Reviewed-by: Andy Lutomirski <luto@...nel.org>
Fixes: 1e0fb9ec679c ("perf: Add pmu callbacks to track event mapping and unmapping")
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: http://lkml.kernel.org/r/20170802173930.cstykcqefmqt7jau@hirez.programming.kicks-ass.net
---
arch/x86/events/core.c | 16 +++++++---------
include/linux/perf_event.h | 4 ++--
kernel/events/core.c | 6 +++---
3 files changed, 12 insertions(+), 14 deletions(-)
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored)
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
}
-static void x86_pmu_event_mapped(struct perf_event *event)
+static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
@@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
+ lockdep_assert_held_exclusive(&mm->mmap_sem);
- if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+ if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
-static void x86_pmu_event_unmapped(struct perf_event *event)
+static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
- if (!current->mm)
- return;
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
- if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+ if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
static int x86_pmu_event_idx(struct perf_event *event)
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -310,8 +310,8 @@ struct pmu {
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
- void (*event_mapped) (struct perf_event *event); /*optional*/
- void (*event_unmapped) (struct perf_event *event); /*optional*/
+ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /*optional*/
+ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /*optional*/
/*
* Flags for ->add()/->del()/ ->start()/->stop(). There are
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5090,7 +5090,7 @@ static void perf_mmap_open(struct vm_are
atomic_inc(&event->rb->aux_mmap_count);
if (event->pmu->event_mapped)
- event->pmu->event_mapped(event);
+ event->pmu->event_mapped(event, vma->vm_mm);
}
static void perf_pmu_output_stop(struct perf_event *event);
@@ -5113,7 +5113,7 @@ static void perf_mmap_close(struct vm_ar
unsigned long size = perf_data_size(rb);
if (event->pmu->event_unmapped)
- event->pmu->event_unmapped(event);
+ event->pmu->event_unmapped(event, vma->vm_mm);
/*
* rb->aux_mmap_count will always drop before rb->mmap_count and
@@ -5411,7 +5411,7 @@ static int perf_mmap(struct file *file,
vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
- event->pmu->event_mapped(event);
+ event->pmu->event_mapped(event, vma->vm_mm);
return ret;
}
Powered by blists - more mailing lists