[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20160729150901.GA26514@kernel.org>
Date: Fri, 29 Jul 2016 12:09:01 -0300
From: Arnaldo Carvalho de Melo <acme@...nel.org>
To: Bryton Lee <brytonlee01@...il.com>
Cc: Ingo Molnar <mingo@...nel.org>, linux-kernel@...r.kernel.org,
Namhyung Kim <namhyung@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Andi Kleen <andi@...stfloor.org>,
David Ahern <dsahern@...il.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Jiri Olsa <jolsa@...hat.com>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Stephane Eranian <eranian@...gle.com>,
Arnaldo Carvalho de Melo <acme@...hat.com>
Subject: Re: [PATCH 13/18] perf symbols: Protect dso cache tree using
dso->lock
Em Fri, Jul 29, 2016 at 08:56:32PM +0800, Bryton Lee escreveu:
> Sorry for disturb you! maybe my question isn't a good question, but I
> do want to know what situation made dwarf callchain unwind process
> concurrently, can you guys give a example to elaborate on that. thank
> you very much!
IIRC that was for things like 'perf top --call-graph dwarf', where there
are two threads, one traversing a set of histograms to present them on
the screen and another creating another set of histograms from events it
reads from the perf mmap, Jiri?
- Arnaldo
> On Mon, May 18, 2015 at 11:50 PM, Arnaldo Carvalho de Melo
> <acme@...nel.org> wrote:
> > From: Namhyung Kim <namhyung@...nel.org>
> >
> > The dso cache is accessed during dwarf callchain unwind and it might be
> > processed concurrently. Protect it under dso->lock.
> >
> > Note that it doesn't protect dso_cache__find(). I think it's safe to
> > access to the cache tree without the lock since we don't delete nodes.
> >
> > It it missed an existing node due to rotation, it'll find it during
> > dso_cache__insert() anyway.
> >
> > Signed-off-by: Namhyung Kim <namhyung@...nel.org>
> > Cc: Adrian Hunter <adrian.hunter@...el.com>
> > Cc: Andi Kleen <andi@...stfloor.org>
> > Cc: David Ahern <dsahern@...il.com>
> > Cc: Frederic Weisbecker <fweisbec@...il.com>
> > Cc: Jiri Olsa <jolsa@...hat.com>
> > Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
> > Cc: Stephane Eranian <eranian@...gle.com>
> > Link: http://lkml.kernel.org/r/1431909055-21442-27-git-send-email-namhyung@kernel.org
> > Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
> > ---
> > tools/perf/util/dso.c | 34 +++++++++++++++++++++++++++-------
> > 1 file changed, 27 insertions(+), 7 deletions(-)
> >
> > diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
> > index 482d6024ef13..666e1db44541 100644
> > --- a/tools/perf/util/dso.c
> > +++ b/tools/perf/util/dso.c
> > @@ -495,10 +495,12 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
> > }
> >
> > static void
> > -dso_cache__free(struct rb_root *root)
> > +dso_cache__free(struct dso *dso)
> > {
> > + struct rb_root *root = &dso->data.cache;
> > struct rb_node *next = rb_first(root);
> >
> > + pthread_mutex_lock(&dso->lock);
> > while (next) {
> > struct dso_cache *cache;
> >
> > @@ -507,10 +509,12 @@ dso_cache__free(struct rb_root *root)
> > rb_erase(&cache->rb_node, root);
> > free(cache);
> > }
> > + pthread_mutex_unlock(&dso->lock);
> > }
> >
> > -static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
> > +static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
> > {
> > + const struct rb_root *root = &dso->data.cache;
> > struct rb_node * const *p = &root->rb_node;
> > const struct rb_node *parent = NULL;
> > struct dso_cache *cache;
> > @@ -529,17 +533,20 @@ static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
> > else
> > return cache;
> > }
> > +
> > return NULL;
> > }
> >
> > -static void
> > -dso_cache__insert(struct rb_root *root, struct dso_cache *new)
> > +static struct dso_cache *
> > +dso_cache__insert(struct dso *dso, struct dso_cache *new)
> > {
> > + struct rb_root *root = &dso->data.cache;
> > struct rb_node **p = &root->rb_node;
> > struct rb_node *parent = NULL;
> > struct dso_cache *cache;
> > u64 offset = new->offset;
> >
> > + pthread_mutex_lock(&dso->lock);
> > while (*p != NULL) {
> > u64 end;
> >
> > @@ -551,10 +558,17 @@ dso_cache__insert(struct rb_root *root, struct dso_cache *new)
> > p = &(*p)->rb_left;
> > else if (offset >= end)
> > p = &(*p)->rb_right;
> > + else
> > + goto out;
> > }
> >
> > rb_link_node(&new->rb_node, parent, p);
> > rb_insert_color(&new->rb_node, root);
> > +
> > + cache = NULL;
> > +out:
> > + pthread_mutex_unlock(&dso->lock);
> > + return cache;
> > }
> >
> > static ssize_t
> > @@ -572,6 +586,7 @@ static ssize_t
> > dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
> > {
> > struct dso_cache *cache;
> > + struct dso_cache *old;
> > ssize_t ret;
> >
> > do {
> > @@ -591,7 +606,12 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
> >
> > cache->offset = cache_offset;
> > cache->size = ret;
> > - dso_cache__insert(&dso->data.cache, cache);
> > + old = dso_cache__insert(dso, cache);
> > + if (old) {
> > + /* we lose the race */
> > + free(cache);
> > + cache = old;
> > + }
> >
> > ret = dso_cache__memcpy(cache, offset, data, size);
> >
> > @@ -608,7 +628,7 @@ static ssize_t dso_cache_read(struct dso *dso, u64 offset,
> > {
> > struct dso_cache *cache;
> >
> > - cache = dso_cache__find(&dso->data.cache, offset);
> > + cache = dso_cache__find(dso, offset);
> > if (cache)
> > return dso_cache__memcpy(cache, offset, data, size);
> > else
> > @@ -964,7 +984,7 @@ void dso__delete(struct dso *dso)
> >
> > dso__data_close(dso);
> > auxtrace_cache__free(dso->auxtrace_cache);
> > - dso_cache__free(&dso->data.cache);
> > + dso_cache__free(dso);
> > dso__free_a2l(dso);
> > zfree(&dso->symsrc_filename);
> > pthread_mutex_destroy(&dso->lock);
> > --
> > 2.1.0
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> > the body of a message to majordomo@...r.kernel.org
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
> > Please read the FAQ at http://www.tux.org/lkml/
>
>
>
> --
> Best Regards
>
> Bryton.Lee
Powered by blists - more mailing lists