[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191027105243.34339-3-like.xu@linux.intel.com>
Date: Sun, 27 Oct 2019 18:52:39 +0800
From: Like Xu <like.xu@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <sean.j.christopherson@...el.com>,
Jim Mattson <jmattson@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Borislav Petkov <bp@...en8.de>, Ingo Molnar <mingo@...hat.com>,
Jiri Olsa <jolsa@...hat.com>, Joerg Roedel <joro@...tes.org>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Vitaly Kuznetsov <vkuznets@...hat.com>, kan.liang@...el.com,
wei.w.wang@...el.com, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Subject: [PATCH v4 2/6] perf/core: Provide a kernel-internal interface to pause perf_event
Exporting perf_event_pause() as an external accessor for kernel users (such
as KVM) who may do both disable perf_event and read count with just one
time to hold perf_event_ctx_lock. Also the value could be reset optionally.
Suggested-by: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Like Xu <like.xu@...ux.intel.com>
---
include/linux/perf_event.h | 5 +++++
kernel/events/core.c | 18 ++++++++++++++++++
2 files changed, 23 insertions(+)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d601df36e671..e9768bfc76f6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1337,6 +1337,7 @@ extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
+extern u64 perf_event_pause(struct perf_event *event, bool reset);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
@@ -1420,6 +1421,10 @@ static inline int perf_event_period(struct perf_event *event, u64 value)
{
return -EINVAL;
}
+static inline u64 perf_event_pause(struct perf_event *event, bool reset)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2fbb485ae880..35cc901e0e4f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5029,6 +5029,24 @@ static void _perf_event_reset(struct perf_event *event)
perf_event_update_userpage(event);
}
+/* Assume it's not an event with inherit set. */
+u64 perf_event_pause(struct perf_event *event, bool reset)
+{
+ struct perf_event_context *ctx;
+ u64 count;
+
+ ctx = perf_event_ctx_lock(event);
+ WARN_ON_ONCE(event->attr.inherit);
+ _perf_event_disable(event);
+ count = local64_read(&event->count);
+ if (reset)
+ local64_set(&event->count, 0);
+ perf_event_ctx_unlock(event, ctx);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(perf_event_pause);
+
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
--
2.21.0
Powered by blists - more mailing lists