[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7d4d7dd4-3bab-a63b-c666-89f7ccd7ace0@linux.intel.com>
Date: Sat, 7 Oct 2017 00:31:37 +0800
From: "Jin, Yao" <yao.jin@...ux.intel.com>
To: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: jolsa@...nel.org, peterz@...radead.org, mingo@...hat.com,
alexander.shishkin@...ux.intel.com, Linux-kernel@...r.kernel.org,
ak@...ux.intel.com, kan.liang@...el.com, yao.jin@...el.com
Subject: Re: [PATCH v1 1/4] perf annotate: create a new hists to manage
multiple events samples
On 10/5/2017 9:21 PM, Arnaldo Carvalho de Melo wrote:
> Em Wed, Aug 16, 2017 at 06:18:33PM +0800, Jin Yao escreveu:
>> An issue is found during using perf annotate.
>>
>> perf record -e cycles,branches ...
>> perf annotate main --stdio
>>
>> The result only shows cycles. It should show both cycles and
>> branches on the left side. It works with "--group", but need
>> this to work even without groups.
>
> Right, for --group we know that we'll be reading all the counters at
> each sample, so it all works and we can use the current design.
>
> When we're not using groups tho, each sample has just one of the events
> and we end up with separate views.
>
> Note tho that since the annotation buckets are kept per 'struct symbol'
> instance, this problem should be present only in the hist_entry based
> views, i.e. 'perf report' and 'perf top', right?
Yes, it seems to be in hist_entry based view.
'perf annotate', 'perf report' maybe others.
>
> I.e. all struct hist_entry->ms.sym instances point to the same stuct
> symbol and thus will use the same annotation histogram buckets, i.e.
> symbol__annotation(hist_entry->ms.sym) point to the same 'struct
> annotation' instance, and then its a matter of passing this pointer to
> annotation__histogram(notes, IDX) where this IDX is perf_evsel->idx.
>
> I wonder if we can't just add a new rb_node in struct hist_entry and
> have it in two rb_trees, i.e. in two 'struct hists' instances, one per
> evsel and one per evlist.
>
Currently we just have per-evsel hists. This idea will create a new per-evlist hists.
struct perf_evlist {
......
struct hists hists;
};
And let the hist_entry be linked in both per-evsel hists and per-evlist hists.
Please correct me if my understanding is wrong for this idea.
Thanks
Jin Yao
> To be continued...
>
>> In current design, the hists is per event. So we need a new
>> hists to manage the samples for multiple events and use a new
>> hist_event data structure to save the map/symbol information
>> for per event.
>>
>> Signed-off-by: Jin Yao <yao.jin@...ux.intel.com>
>> ---
>> tools/perf/builtin-annotate.c | 60 +++++++++++++++++++++++++++++++------------
>> tools/perf/util/annotate.c | 8 ++++++
>> tools/perf/util/annotate.h | 4 +++
>> tools/perf/util/sort.c | 21 +++++++++++++++
>> tools/perf/util/sort.h | 13 ++++++++++
>> 5 files changed, 89 insertions(+), 17 deletions(-)
>>
>> diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
>> index 658c920..833866c 100644
>> --- a/tools/perf/builtin-annotate.c
>> +++ b/tools/perf/builtin-annotate.c
>> @@ -45,6 +45,7 @@ struct perf_annotate {
>> bool skip_missing;
>> const char *sym_hist_filter;
>> const char *cpu_list;
>> + struct hists events_hists;
>> DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
>> };
>>
>> @@ -153,6 +154,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
>> struct hists *hists = evsel__hists(evsel);
>> struct hist_entry *he;
>> int ret;
>> + struct hist_event *hevt;
>>
>> if (ann->sym_hist_filter != NULL &&
>> (al->sym == NULL ||
>> @@ -177,12 +179,30 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
>> */
>> process_branch_stack(sample->branch_stack, al, sample);
>>
>> - he = hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
>> - if (he == NULL)
>> - return -ENOMEM;
>> + if (symbol_conf.nr_events == 1) {
>> + he = hists__add_entry(hists, al, NULL, NULL, NULL,
>> + sample, true);
>> + if (he == NULL)
>> + return -ENOMEM;
>> +
>> + ret = hist_entry__inc_addr_samples(he, sample, evsel->idx,
>> + al->addr);
>> + hists__inc_nr_samples(hists, true);
>> + } else {
>> + he = hists__add_entry(&ann->events_hists, al, NULL,
>> + NULL, NULL, sample, true);
>> + if (he == NULL)
>> + return -ENOMEM;
>> +
>> + hevt = hist_entry__event_add(he, evsel);
>> + if (hevt == NULL)
>> + return -ENOMEM;
>> +
>> + ret = hist_event__inc_addr_samples(hevt, sample, hevt->idx,
>> + al->addr);
>> + hists__inc_nr_samples(&ann->events_hists, true);
>> + }
>>
>> - ret = hist_entry__inc_addr_samples(he, sample, evsel->idx, al->addr);
>> - hists__inc_nr_samples(hists, true);
>> return ret;
>> }
>>
>> @@ -304,7 +324,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
>> int ret;
>> struct perf_session *session = ann->session;
>> struct perf_evsel *pos;
>> - u64 total_nr_samples;
>> + u64 total_nr_samples = 0;
>> + struct hists *hists;
>>
>> if (ann->cpu_list) {
>> ret = perf_session__cpu_bitmap(session, ann->cpu_list,
>> @@ -335,23 +356,26 @@ static int __cmd_annotate(struct perf_annotate *ann)
>> if (verbose > 2)
>> perf_session__fprintf_dsos(session, stdout);
>>
>> - total_nr_samples = 0;
>> - evlist__for_each_entry(session->evlist, pos) {
>> - struct hists *hists = evsel__hists(pos);
>> - u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
>> + if (symbol_conf.nr_events > 1) {
>> + hists = &ann->events_hists;
>> + total_nr_samples +=
>> + hists->stats.nr_events[PERF_RECORD_SAMPLE];
>> +
>> + hists__collapse_resort(hists, NULL);
>> + hists__output_resort(hists, NULL);
>> + hists__find_annotations(hists, NULL, ann);
>> + } else {
>> + evlist__for_each_entry(session->evlist, pos) {
>> + hists = evsel__hists(pos);
>> + total_nr_samples +=
>> + hists->stats.nr_events[PERF_RECORD_SAMPLE];
>>
>> - if (nr_samples > 0) {
>> - total_nr_samples += nr_samples;
>> hists__collapse_resort(hists, NULL);
>> /* Don't sort callchain */
>> perf_evsel__reset_sample_bit(pos, CALLCHAIN);
>> perf_evsel__output_resort(pos, NULL);
>> -
>> - if (symbol_conf.event_group &&
>> - !perf_evsel__is_group_leader(pos))
>> - continue;
>> -
>> hists__find_annotations(hists, pos, ann);
>> + break;
>> }
>> }
>>
>> @@ -486,6 +510,8 @@ int cmd_annotate(int argc, const char **argv)
>> if (ret < 0)
>> goto out_delete;
>>
>> + __hists__init(&annotate.events_hists, &perf_hpp_list);
>> +
>> if (setup_sorting(NULL) < 0)
>> usage_with_options(annotate_usage, options);
>>
>> diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
>> index 2dab0e5..16ec881 100644
>> --- a/tools/perf/util/annotate.c
>> +++ b/tools/perf/util/annotate.c
>> @@ -828,6 +828,14 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp
>> return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip, sample);
>> }
>>
>> +int hist_event__inc_addr_samples(struct hist_event *hevt,
>> + struct perf_sample *sample,
>> + int idx, u64 ip)
>> +{
>> + return symbol__inc_addr_samples(hevt->ms.sym, hevt->ms.map,
>> + idx, ip, sample);
>> +}
>> +
>> static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
>> {
>> dl->ins.ops = ins__find(arch, dl->ins.name);
>> diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
>> index 9ce575c..0d44cfe 100644
>> --- a/tools/perf/util/annotate.h
>> +++ b/tools/perf/util/annotate.h
>> @@ -165,6 +165,10 @@ int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
>> int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
>> int evidx, u64 addr);
>>
>> +int hist_event__inc_addr_samples(struct hist_event *hevt,
>> + struct perf_sample *sample,
>> + int idx, u64 addr);
>> +
>> int symbol__alloc_hist(struct symbol *sym);
>> void symbol__annotate_zero_histograms(struct symbol *sym);
>>
>> diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
>> index 12359bd..1500a8e 100644
>> --- a/tools/perf/util/sort.c
>> +++ b/tools/perf/util/sort.c
>> @@ -1490,6 +1490,27 @@ struct sort_entry sort_sym_size = {
>> .se_width_idx = HISTC_SYM_SIZE,
>> };
>>
>> +struct hist_event *hist_entry__event_add(struct hist_entry *he,
>> + struct perf_evsel *evsel)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < he->event_nr; i++) {
>> + if (he->events[i].evsel == evsel)
>> + return &he->events[i];
>> + }
>> +
>> + if (i < HIST_ENTRY_EVENTS) {
>> + memset(&he->events[i], 0, sizeof(struct hist_event));
>> + memcpy(&he->events[i].ms, &he->ms, sizeof(struct map_symbol));
>> + he->events[i].evsel = evsel;
>> + he->events[i].idx = i;
>> + he->event_nr++;
>> + return &he->events[i];
>> + }
>> +
>> + return NULL;
>> +}
>>
>> struct sort_dimension {
>> const char *name;
>> diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
>> index b7c7559..446a2a3 100644
>> --- a/tools/perf/util/sort.h
>> +++ b/tools/perf/util/sort.h
>> @@ -79,6 +79,14 @@ struct hist_entry_ops {
>> void (*free)(void *ptr);
>> };
>>
>> +#define HIST_ENTRY_EVENTS 8
>> +
>> +struct hist_event {
>> + struct map_symbol ms;
>> + struct perf_evsel *evsel;
>> + int idx;
>> +};
>> +
>> /**
>> * struct hist_entry - histogram entry
>> *
>> @@ -95,6 +103,8 @@ struct hist_entry {
>> struct he_stat stat;
>> struct he_stat *stat_acc;
>> struct map_symbol ms;
>> + struct hist_event events[HIST_ENTRY_EVENTS];
>> + int event_nr;
>> struct thread *thread;
>> struct comm *comm;
>> struct namespace_id cgroup_id;
>> @@ -291,4 +301,7 @@ sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right);
>> int64_t
>> sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right);
>> char *hist_entry__get_srcline(struct hist_entry *he);
>> +struct hist_event *hist_entry__event_add(struct hist_entry *he,
>> + struct perf_evsel *evsel);
>> +
>> #endif /* __PERF_SORT_H */
>> --
>> 2.7.4
Powered by blists - more mailing lists