[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170110102502.106187-5-davidcc@google.com>
Date: Tue, 10 Jan 2017 02:25:00 -0800
From: David Carrillo-Cisneros <davidcc@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Andi Kleen <ak@...ux.intel.com>,
Kan Liang <kan.liang@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
Borislav Petkov <bp@...e.de>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
Mark Rutland <mark.rutland@....com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Vince Weaver <vince@...ter.net>, Paul Turner <pjt@...gle.com>,
Stephane Eranian <eranian@...gle.com>,
David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [RFC 4/6] perf/core: avoid rb-tree traversal when no inactive events
Avoid unnecessary rb-tree queries.
Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
include/linux/perf_event.h | 2 ++
kernel/events/core.c | 17 +++++++++++++++--
2 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fd32ecc37d33..2e0c94a770e2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -748,6 +748,8 @@ struct perf_event_context {
struct list_head event_list;
int nr_events;
int nr_active;
+ int nr_inactive_pinned;
+ int nr_inactive_flexible;
int is_active;
int nr_stat;
int nr_freq;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 302f13ca75dc..c7715b2627a9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1531,6 +1531,10 @@ rbtree_add_inactive(struct perf_event *event,
/* Add new node and rebalance tree. */
rb_link_node(&event->rbtree_node, parent, pos);
rb_insert_color(&event->rbtree_node, &ctx->rbtree_root);
+ if (event->attr.pinned)
+ ctx->nr_inactive_pinned++;
+ else
+ ctx->nr_inactive_flexible++;
}
static void
@@ -1754,6 +1758,11 @@ static void ctx_sched_groups_del(struct perf_event *group,
WARN_ON(group->state != PERF_EVENT_STATE_INACTIVE);
rb_erase(&group->rbtree_node, &ctx->rbtree_root);
list_del_init(&group->ctx_active_entry);
+ if (group->attr.pinned)
+ ctx->nr_inactive_pinned--;
+ else
+ ctx->nr_inactive_flexible--;
+
}
/*
@@ -2142,6 +2151,10 @@ ctx_sched_groups_to_active(struct perf_event *event, struct perf_event_context *
WARN_ON(event->state != PERF_EVENT_STATE_ACTIVE);
rb_erase(&event->rbtree_node, &ctx->rbtree_root);
list_move_tail(&event->ctx_active_entry, h);
+ if (event->attr.pinned)
+ ctx->nr_inactive_pinned--;
+ else
+ ctx->nr_inactive_flexible--;
}
static int
@@ -3371,11 +3384,11 @@ ctx_sched_in(struct perf_event_context *ctx,
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (is_active & EVENT_PINNED)
+ if (is_active & EVENT_PINNED && ctx->nr_inactive_pinned)
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
- if (is_active & EVENT_FLEXIBLE)
+ if (is_active & EVENT_FLEXIBLE && ctx->nr_inactive_flexible)
ctx_flexible_sched_in(ctx, cpuctx);
}
--
2.11.0.390.gc69c2f50cf-goog
Powered by blists - more mailing lists