lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190814184051.3125-12-acme@kernel.org>
Date:   Wed, 14 Aug 2019 15:40:34 -0300
From:   Arnaldo Carvalho de Melo <acme@...nel.org>
To:     Ingo Molnar <mingo@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>
Cc:     Jiri Olsa <jolsa@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
        Clark Williams <williams@...hat.com>,
        linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
        Arnaldo Carvalho de Melo <acme@...hat.com>,
        Adrian Hunter <adrian.hunter@...el.com>
Subject: [PATCH 11/28] perf top: Collapse and resort all evsels in a group

From: Arnaldo Carvalho de Melo <acme@...hat.com>

And link them, i.e. find the hist entries in the non-leader events and
link them to the ones in the leader.

This should be the same thing already done for the 'perf report' case,
but now we do it periodically.

With this in place we get percentages in from the second overhead column
on, not just on the first (the leader).

Try it using:

  perf top --stdio -e '{cycles,instructions}'

You should see something like:

   PerfTop:   20776 irqs/sec  kernel:68.7%  exact:  0.0% lost: 0/0 drop: 0/0 [cycles],  (all, 8 CPUs)
  ---------------------------------------------------------------------------------------------------

     4.44%   0.44%  [kernel]                 [k] do_syscall_64
     2.27%   0.17%  [kernel]                 [k] entry_SYSCALL_64
     1.73%   0.27%  [kernel]                 [k] syscall_return_via_sysret
     1.60%   0.91%  [kernel]                 [k] _raw_spin_lock_irqsave
     1.45%   3.53%  libglib-2.0.so.0.6000.4  [.] g_string_insert_unichar
     1.39%   0.21%  [kernel]                 [k] copy_user_enhanced_fast_string
     1.26%   1.15%  [kernel]                 [k] psi_task_change
     1.16%   0.14%  libpixman-1.so.0.38.0    [.] 0x000000000006f403
     1.00%   0.32%  [kernel]                 [k] __sched_text_start
     0.97%   2.11%  [kernel]                 [k] n_tty_write
     0.96%   0.04%  [kernel]                 [k] queued_spin_lock_slowpath
     0.93%   0.88%  [kernel]                 [k] menu_select
     0.87%   0.14%  [kernel]                 [k] try_to_wake_up
     0.77%   0.10%  libpixman-1.so.0.38.0    [.] 0x000000000006f40b
     0.73%   0.09%  libpixman-1.so.0.38.0    [.] 0x000000000006f413
     0.69%   0.48%  libc-2.29.so             [.] __memmove_avx_unaligned_erms
     0.68%   0.29%  [kernel]                 [k] _raw_spin_lock_irq
     0.61%   0.04%  libpixman-1.so.0.38.0    [.] 0x000000000006f423
     0.60%   0.37%  [kernel]                 [k] native_sched_clock
     0.57%   0.23%  [kernel]                 [k] do_idle
     0.57%   0.23%  [kernel]                 [k] __fget
     0.56%   0.30%  [kernel]                 [k] __switch_to_asm
     0.56%   0.00%  libc-2.29.so             [.] __memset_avx2_erms
     0.52%   0.32%  [kernel]                 [k] _raw_spin_lock
     0.49%   0.24%  [kernel]                 [k] n_tty_poll
     0.49%   0.54%  libglib-2.0.so.0.6000.4  [.] g_mutex_lock
     0.48%   0.62%  [kernel]                 [k] _raw_spin_unlock_irqrestore
     0.47%   0.27%  [kernel]                 [k] __switch_to
     0.47%   0.25%  [kernel]                 [k] pick_next_task_fair
     0.45%   0.17%  [kernel]                 [k] filldir64
     0.40%   0.16%  [kernel]                 [k] update_rq_clock
     0.39%   0.19%  [kernel]                 [k] enqueue_task_fair
  #

Cc: Adrian Hunter <adrian.hunter@...el.com>
Cc: Jiri Olsa <jolsa@...nel.org>
Cc: Namhyung Kim <namhyung@...nel.org>
Link: https://lkml.kernel.org/n/tip-uw8cjeifxvjpkjp6x2iil0ar@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
 tools/perf/builtin-top.c | 30 ++++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 94e34853a238..78e7efc597a6 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -264,6 +264,30 @@ static void perf_top__show_details(struct perf_top *top)
 	pthread_mutex_unlock(&notes->lock);
 }
 
+static void evlist__resort_hists(struct evlist *evlist)
+{
+	struct evsel *pos;
+
+	evlist__for_each_entry(evlist, pos) {
+		struct hists *hists = evsel__hists(pos);
+
+		hists__collapse_resort(hists, NULL);
+
+		/* Non-group events are considered as leader */
+		if (symbol_conf.event_group &&
+		    !perf_evsel__is_group_leader(pos)) {
+			struct hists *leader_hists = evsel__hists(pos->leader);
+
+			hists__match(leader_hists, hists);
+			hists__link(leader_hists, hists);
+		}
+	}
+
+	evlist__for_each_entry(evlist, pos) {
+		perf_evsel__output_resort(pos, NULL);
+	}
+}
+
 static void perf_top__print_sym_table(struct perf_top *top)
 {
 	char bf[160];
@@ -304,8 +328,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
 		}
 	}
 
-	hists__collapse_resort(hists, NULL);
-	perf_evsel__output_resort(evsel, NULL);
+	evlist__resort_hists(top->evlist);
 
 	hists__output_recalc_col_len(hists, top->print_entries - printed);
 	putchar('\n');
@@ -570,8 +593,7 @@ static void perf_top__sort_new_samples(void *arg)
 		}
 	}
 
-	hists__collapse_resort(hists, NULL);
-	perf_evsel__output_resort(evsel, NULL);
+	evlist__resort_hists(t->evlist);
 
 	if (t->lost || t->drop)
 		pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
-- 
2.21.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ