[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <37D7C6CF3E00A74B8858931C1DB2F077537FF516@SHSMSX103.ccr.corp.intel.com>
Date: Wed, 10 Jan 2018 19:31:26 +0000
From: "Liang, Kan" <kan.liang@...el.com>
To: Jiri Olsa <jolsa@...hat.com>
CC: "acme@...nel.org" <acme@...nel.org>,
"peterz@...radead.org" <peterz@...radead.org>,
"mingo@...hat.com" <mingo@...hat.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"wangnan0@...wei.com" <wangnan0@...wei.com>,
"jolsa@...nel.org" <jolsa@...nel.org>,
"namhyung@...nel.org" <namhyung@...nel.org>,
"ak@...ux.intel.com" <ak@...ux.intel.com>,
"yao.jin@...ux.intel.com" <yao.jin@...ux.intel.com>
Subject: RE: [PATCH V3 02/12] perf mmap: factor out function to find
ringbuffer position
> On Thu, Dec 21, 2017 at 10:08:44AM -0800, kan.liang@...el.com wrote:
> > From: Kan Liang <kan.liang@...el.com>
> >
> > The perf record has specific codes to calculate the ringbuffer
> > position for both overwrite and non-overwrite mode.
> > The perf top will support both modes later.
> > It is useful to make the specific codes generic.
> >
> > Introduce a new interface perf_mmap__read_init() to find ringbuffer
> > position.
> > Add a check for map->refcnt in perf_mmap__read_init().
> > 'size' is needed for both perf_mmap__read_init() and perf_mmap__push().
> > Have to calculate in each function.
>
> it's 2 separate changes then plus 1 not mentioned in changelog..
> could you please split this into separate patches:
>
> - Introduce a new interface perf_mmap__read_init ...
> - Add a check for map->refcnt in perf_mmap__read_init
> - add new EAGAIN return value logic
>
Thanks for the comments, Jirka.
I will split the patch accordingly in V4.
Thanks,
Kan
> thanks,
> jirka
>
> >
> > Signed-off-by: Kan Liang <kan.liang@...el.com>
> > ---
> > tools/perf/util/mmap.c | 62
> > ++++++++++++++++++++++++++++++++++----------------
> > tools/perf/util/mmap.h | 2 ++
> > 2 files changed, 45 insertions(+), 19 deletions(-)
> >
> > diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index
> > 05076e6..3fd4f3c 100644
> > --- a/tools/perf/util/mmap.c
> > +++ b/tools/perf/util/mmap.c
> > @@ -267,41 +267,65 @@ static int overwrite_rb_find_range(void *buf, int
> mask, u64 head, u64 *start, u6
> > return -1;
> > }
> >
> > -int perf_mmap__push(struct perf_mmap *md, bool overwrite,
> > - void *to, int push(void *to, void *buf, size_t size))
> > +/*
> > + * Report the start and end of the available data in ringbuffer */
> > +int perf_mmap__read_init(struct perf_mmap *map, bool overwrite,
> > + u64 *start, u64 *end)
> > {
> > - u64 head = perf_mmap__read_head(md);
> > - u64 old = md->prev;
> > - u64 end = head, start = old;
> > - unsigned char *data = md->base + page_size;
> > + u64 head = perf_mmap__read_head(map);
> > + u64 old = map->prev;
> > + unsigned char *data = map->base + page_size;
> > unsigned long size;
> > - void *buf;
> > - int rc = 0;
> >
> > - start = overwrite ? head : old;
> > - end = overwrite ? old : head;
> > + /*
> > + * Check if event was unmapped due to a POLLHUP/POLLERR.
> > + */
> > + if (!refcount_read(&map->refcnt))
> > + return -EINVAL;
> >
> > - if (start == end)
> > - return 0;
> > + *start = overwrite ? head : old;
> > + *end = overwrite ? old : head;
> >
> > - size = end - start;
> > - if (size > (unsigned long)(md->mask) + 1) {
> > + if (*start == *end)
> > + return -EAGAIN;
> > +
> > + size = *end - *start;
> > + if (size > (unsigned long)(map->mask) + 1) {
> > if (!overwrite) {
> > WARN_ONCE(1, "failed to keep up with mmap data.
> (warn only
> > once)\n");
> >
> > - md->prev = head;
> > - perf_mmap__consume(md, overwrite);
> > - return 0;
> > + map->prev = head;
> > + perf_mmap__consume(map, overwrite);
> > + return -EAGAIN;
> > }
> >
> > /*
> > * Backward ring buffer is full. We still have a chance to read
> > * most of data from it.
> > */
> > - if (overwrite_rb_find_range(data, md->mask, head, &start,
> &end))
> > - return -1;
> > + if (overwrite_rb_find_range(data, map->mask, head, start,
> end))
> > + return -EINVAL;
> > }
> >
> > + return 0;
> > +}
> > +
> > +int perf_mmap__push(struct perf_mmap *md, bool overwrite,
> > + void *to, int push(void *to, void *buf, size_t size)) {
> > + u64 head = perf_mmap__read_head(md);
> > + u64 end, start;
> > + unsigned char *data = md->base + page_size;
> > + unsigned long size;
> > + void *buf;
> > + int rc;
> > +
> > + rc = perf_mmap__read_init(md, overwrite, &start, &end);
> > + if (rc < 0)
> > + return (rc == -EAGAIN) ? 0 : -1;
> > +
> > + size = end - start;
> > if ((start & md->mask) + size != (end & md->mask)) {
> > buf = &data[start & md->mask];
> > size = md->mask + 1 - (start & md->mask); diff --git
> > a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index
> > d640273..abe9b9f 100644
> > --- a/tools/perf/util/mmap.h
> > +++ b/tools/perf/util/mmap.h
> > @@ -94,4 +94,6 @@ int perf_mmap__push(struct perf_mmap *md, bool
> > backward,
> >
> > size_t perf_mmap__mmap_len(struct perf_mmap *map);
> >
> > +int perf_mmap__read_init(struct perf_mmap *map, bool overwrite,
> > + u64 *start, u64 *end);
> > #endif /*__PERF_MMAP_H */
> > --
> > 2.5.5
> >
Powered by blists - more mailing lists