[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1477787923-61185-29-git-send-email-davidcc@google.com>
Date: Sat, 29 Oct 2016 17:38:25 -0700
From: David Carrillo-Cisneros <davidcc@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Andi Kleen <ak@...ux.intel.com>,
Kan Liang <kan.liang@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
Vegard Nossum <vegard.nossum@...il.com>,
Marcelo Tosatti <mtosatti@...hat.com>,
Nilay Vaish <nilayvaish@...il.com>,
Borislav Petkov <bp@...e.de>,
Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
Ravi V Shankar <ravi.v.shankar@...el.com>,
Fenghua Yu <fenghua.yu@...el.com>,
Paul Turner <pjt@...gle.com>,
Stephane Eranian <eranian@...gle.com>,
David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 28/46] perf,perf/x86,perf/powerpc,perf/arm,perf/*: add int error return to pmu::read
New PMUs, such as CMT's, do not guarantee that a read will succeed even
if pmu::add was successful.
In the generic code, this patch adds an int error return and completes the
error checking path up to perf_read().
In CMT's PMU, it adds proper error handling of hw read failure errors.
In other PMUs, pmu::read() simply returns 0.
Reviewed-by: Stephane Eranian <eranian@...gle.com>
Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
arch/alpha/kernel/perf_event.c | 3 +-
arch/arc/kernel/perf_event.c | 3 +-
arch/arm64/include/asm/hw_breakpoint.h | 2 +-
arch/arm64/kernel/hw_breakpoint.c | 3 +-
arch/metag/kernel/perf/perf_event.c | 5 +--
arch/mips/kernel/perf_event_mipsxx.c | 3 +-
arch/powerpc/include/asm/hw_breakpoint.h | 2 +-
arch/powerpc/kernel/hw_breakpoint.c | 3 +-
arch/powerpc/perf/core-book3s.c | 11 +++---
arch/powerpc/perf/core-fsl-emb.c | 5 +--
arch/powerpc/perf/hv-24x7.c | 5 +--
arch/powerpc/perf/hv-gpci.c | 3 +-
arch/s390/kernel/perf_cpum_cf.c | 5 +--
arch/s390/kernel/perf_cpum_sf.c | 3 +-
arch/sh/include/asm/hw_breakpoint.h | 2 +-
arch/sh/kernel/hw_breakpoint.c | 3 +-
arch/sparc/kernel/perf_event.c | 2 +-
arch/tile/kernel/perf_event.c | 3 +-
arch/x86/events/amd/ibs.c | 2 +-
arch/x86/events/amd/iommu.c | 5 +--
arch/x86/events/amd/uncore.c | 3 +-
arch/x86/events/core.c | 3 +-
arch/x86/events/intel/bts.c | 3 +-
arch/x86/events/intel/cmt.c | 4 ++-
arch/x86/events/intel/cstate.c | 3 +-
arch/x86/events/intel/pt.c | 3 +-
arch/x86/events/intel/rapl.c | 3 +-
arch/x86/events/intel/uncore.c | 3 +-
arch/x86/events/intel/uncore.h | 2 +-
arch/x86/events/msr.c | 3 +-
arch/x86/include/asm/hw_breakpoint.h | 2 +-
arch/x86/kernel/hw_breakpoint.c | 3 +-
arch/x86/kvm/pmu.h | 10 +++---
drivers/bus/arm-cci.c | 3 +-
drivers/bus/arm-ccn.c | 3 +-
drivers/perf/arm_pmu.c | 3 +-
include/linux/perf_event.h | 6 ++--
kernel/events/core.c | 60 ++++++++++++++++++++------------
38 files changed, 120 insertions(+), 73 deletions(-)
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 5c218aa..3bf8a60 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -520,11 +520,12 @@ static void alpha_pmu_del(struct perf_event *event, int flags)
}
-static void alpha_pmu_read(struct perf_event *event)
+static int alpha_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
alpha_perf_event_update(event, hwc, hwc->idx, 0);
+ return 0;
}
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 2ce24e7..efbcc2d 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -116,9 +116,10 @@ static void arc_perf_event_update(struct perf_event *event,
local64_sub(delta, &hwc->period_left);
}
-static void arc_pmu_read(struct perf_event *event)
+static int arc_pmu_read(struct perf_event *event)
{
arc_perf_event_update(event, &event->hw, event->hw.idx);
+ return 0;
}
static int arc_pmu_cache_event(u64 config)
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9510ace..f82f21f 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -127,7 +127,7 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
extern int arch_install_hw_breakpoint(struct perf_event *bp);
extern void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-extern void hw_breakpoint_pmu_read(struct perf_event *bp);
+extern int hw_breakpoint_pmu_read(struct perf_event *bp);
extern int hw_breakpoint_slots(int type);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 948b731..a4fe1c5 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -936,8 +936,9 @@ static int __init arch_hw_breakpoint_init(void)
}
arch_initcall(arch_hw_breakpoint_init);
-void hw_breakpoint_pmu_read(struct perf_event *bp)
+int hw_breakpoint_pmu_read(struct perf_event *bp)
{
+ return 0;
}
/*
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 052cba2..128aa0a 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -360,15 +360,16 @@ static void metag_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
}
-static void metag_pmu_read(struct perf_event *event)
+static int metag_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
- return;
+ return 0;
metag_pmu_event_update(event, hwc, hwc->idx);
+ return 0;
}
static struct pmu pmu = {
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d3ba9f4..d64ed3e 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -507,7 +507,7 @@ static void mipspmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
}
-static void mipspmu_read(struct perf_event *event)
+static int mipspmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -516,6 +516,7 @@ static void mipspmu_read(struct perf_event *event)
return;
mipspmu_event_update(event, hwc, hwc->idx);
+ return 0;
}
static void mipspmu_enable(struct pmu *pmu)
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index ac6432d..5218696 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -66,7 +66,7 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-void hw_breakpoint_pmu_read(struct perf_event *bp);
+int hw_breakpoint_pmu_read(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
extern struct pmu perf_ops_bp;
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 9781c69..8a016ad 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -364,7 +364,8 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
t->ptrace_bps[0] = NULL;
}
-void hw_breakpoint_pmu_read(struct perf_event *bp)
+int hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
+ return 0;
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 72c27b8..1019d1e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1002,20 +1002,20 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
return delta;
}
-static void power_pmu_read(struct perf_event *event)
+static int power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
- return;
+ return 0;
if (!event->hw.idx)
- return;
+ return 0;
if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx);
local64_set(&event->hw.prev_count, val);
- return;
+ return 0;
}
/*
@@ -1029,7 +1029,7 @@ static void power_pmu_read(struct perf_event *event)
val = read_pmc(event->hw.idx);
delta = check_and_compute_delta(prev, val);
if (!delta)
- return;
+ return 0;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
@@ -1049,6 +1049,7 @@ static void power_pmu_read(struct perf_event *event)
if (val < 1)
val = 1;
} while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
+ return 0;
}
/*
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 5d747b4..46d982e 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -176,12 +176,12 @@ static void write_pmlcb(int idx, unsigned long val)
isync();
}
-static void fsl_emb_pmu_read(struct perf_event *event)
+static int fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
- return;
+ return 0;
/*
* Performance monitor interrupts come even when interrupts
@@ -198,6 +198,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
+ return 0;
}
/*
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 7b2ca16..41cd973 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1268,7 +1268,7 @@ static void update_event_count(struct perf_event *event, u64 now)
local64_add(now - prev, &event->count);
}
-static void h_24x7_event_read(struct perf_event *event)
+static int h_24x7_event_read(struct perf_event *event)
{
u64 now;
struct hv_24x7_request_buffer *request_buffer;
@@ -1289,7 +1289,7 @@ static void h_24x7_event_read(struct perf_event *event)
int ret;
if (__this_cpu_read(hv_24x7_txn_err))
- return;
+ return 0;
request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
@@ -1323,6 +1323,7 @@ static void h_24x7_event_read(struct perf_event *event)
now = h_24x7_get_value(event);
update_event_count(event, now);
}
+ return 0;
}
static void h_24x7_event_start(struct perf_event *event, int flags)
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 43fabb3..66c1ce7 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -191,12 +191,13 @@ static u64 h_gpci_get_value(struct perf_event *event)
return count;
}
-static void h_gpci_event_update(struct perf_event *event)
+static int h_gpci_event_update(struct perf_event *event)
{
s64 prev;
u64 now = h_gpci_get_value(event);
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
+ return 0;
}
static void h_gpci_event_start(struct perf_event *event, int flags)
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 037c2a2..37fa78c 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -471,12 +471,13 @@ static int hw_perf_event_update(struct perf_event *event)
return err;
}
-static void cpumf_pmu_read(struct perf_event *event)
+static int cpumf_pmu_read(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
- return;
+ return 0;
hw_perf_event_update(event);
+ return 0;
}
static void cpumf_pmu_start(struct perf_event *event, int flags)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c..87fd04a 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1296,9 +1296,10 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow, event_overflow);
}
-static void cpumsf_pmu_read(struct perf_event *event)
+static int cpumsf_pmu_read(struct perf_event *event)
{
/* Nothing to do ... updates are interrupt-driven */
+ return 0;
}
/* Activate sampling control.
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index ec9ad59..d3ad1bf 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -60,7 +60,7 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-void hw_breakpoint_pmu_read(struct perf_event *bp);
+int hw_breakpoint_pmu_read(struct perf_event *bp);
extern void arch_fill_perf_breakpoint(struct perf_event *bp);
extern int register_sh_ubc(struct sh_ubc *);
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 2197fc5..3a2e719 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -401,9 +401,10 @@ int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
return hw_breakpoint_handler(data);
}
-void hw_breakpoint_pmu_read(struct perf_event *bp)
+int hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
+ return 0;
}
int register_sh_ubc(struct sh_ubc *ubc)
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 710f327..ab118e9 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1131,7 +1131,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
local_irq_restore(flags);
}
-static void sparc_pmu_read(struct perf_event *event)
+static int sparc_pmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 6394c1c..e18cdf2 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -734,9 +734,10 @@ static void tile_pmu_del(struct perf_event *event, int flags)
/*
* Propagate event elapsed time into the event.
*/
-static inline void tile_pmu_read(struct perf_event *event)
+static inline int tile_pmu_read(struct perf_event *event)
{
tile_perf_event_update(event);
+ return 0;
}
/*
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index b26ee32..3de2ada 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -511,7 +511,7 @@ static void perf_ibs_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
}
-static void perf_ibs_read(struct perf_event *event) { }
+static int perf_ibs_read(struct perf_event *event) { return 0; }
PMU_FORMAT_ATTR(rand_en, "config:57");
PMU_FORMAT_ATTR(cnt_ctl, "config:19");
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index b28200d..2bfcaaa 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -317,7 +317,7 @@ static void perf_iommu_start(struct perf_event *event, int flags)
}
-static void perf_iommu_read(struct perf_event *event)
+static int perf_iommu_read(struct perf_event *event)
{
u64 count = 0ULL;
u64 prev_raw_count = 0ULL;
@@ -335,13 +335,14 @@ static void perf_iommu_read(struct perf_event *event)
prev_raw_count = local64_read(&hwc->prev_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
count) != prev_raw_count)
- return;
+ return 0;
/* Handling 48-bit counter overflowing */
delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT);
delta >>= COUNTER_SHIFT;
local64_add(delta, &event->count);
+ return 0;
}
static void perf_iommu_stop(struct perf_event *event, int flags)
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 65577f0..c84415d 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -73,7 +73,7 @@ static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
return NULL;
}
-static void amd_uncore_read(struct perf_event *event)
+static int amd_uncore_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 prev, new;
@@ -90,6 +90,7 @@ static void amd_uncore_read(struct perf_event *event)
delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
delta >>= COUNTER_SHIFT;
local64_add(delta, &event->count);
+ return 0;
}
static void amd_uncore_start(struct perf_event *event, int flags)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d31735f..9e52a7b 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1848,9 +1848,10 @@ static int __init init_hw_perf_events(void)
}
early_initcall(init_hw_perf_events);
-static inline void x86_pmu_read(struct perf_event *event)
+static inline int x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
+ return 0;
}
/*
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 982c9e3..6832d59 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -575,8 +575,9 @@ static int bts_event_init(struct perf_event *event)
return 0;
}
-static void bts_event_read(struct perf_event *event)
+static int bts_event_read(struct perf_event *event)
{
+ return 0;
}
static __init int bts_init(void)
diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index bd903ae..ef1000f 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -1318,8 +1318,10 @@ static struct monr *monr_next_descendant_post(struct monr *pos,
return pos->parent;
}
-static void intel_cmt_event_read(struct perf_event *event)
+static int intel_cmt_event_read(struct perf_event *event)
{
+ /* To add support in next patches in series */
+ return -ENOTSUPP;
}
static inline void __intel_cmt_event_start(struct perf_event *event,
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 3ca87b5..b96fed8 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -323,7 +323,7 @@ static inline u64 cstate_pmu_read_counter(struct perf_event *event)
return val;
}
-static void cstate_pmu_event_update(struct perf_event *event)
+static int cstate_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 prev_raw_count, new_raw_count;
@@ -337,6 +337,7 @@ static void cstate_pmu_event_update(struct perf_event *event)
goto again;
local64_add(new_raw_count - prev_raw_count, &event->count);
+ return 0;
}
static void cstate_pmu_event_start(struct perf_event *event, int mode)
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index c5047b8..c46f946 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1356,8 +1356,9 @@ static int pt_event_add(struct perf_event *event, int mode)
return ret;
}
-static void pt_event_read(struct perf_event *event)
+static int pt_event_read(struct perf_event *event)
{
+ return 0;
}
static void pt_event_destroy(struct perf_event *event)
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 0a535ce..37c3dff 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -411,9 +411,10 @@ static int rapl_pmu_event_init(struct perf_event *event)
return ret;
}
-static void rapl_pmu_event_read(struct perf_event *event)
+static int rapl_pmu_event_read(struct perf_event *event)
{
rapl_event_update(event);
+ return 0;
}
static ssize_t rapl_get_attr_cpumask(struct device *dev,
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index efca268..ad7d035 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -580,10 +580,11 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags)
event->hw.last_tag = ~0ULL;
}
-void uncore_pmu_event_read(struct perf_event *event)
+int uncore_pmu_event_read(struct perf_event *event)
{
struct intel_uncore_box *box = uncore_event_to_box(event);
uncore_perf_event_update(box, event);
+ return 0;
}
/*
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index ad986c1..818b0ef 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -345,7 +345,7 @@ struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
-void uncore_pmu_event_read(struct perf_event *event);
+int uncore_pmu_event_read(struct perf_event *event);
void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
struct event_constraint *
uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4bb3ec6..add5caf 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -170,7 +170,7 @@ static inline u64 msr_read_counter(struct perf_event *event)
return now;
}
-static void msr_event_update(struct perf_event *event)
+static int msr_event_update(struct perf_event *event)
{
u64 prev, now;
s64 delta;
@@ -188,6 +188,7 @@ static void msr_event_update(struct perf_event *event)
delta = sign_extend64(delta, 31);
local64_add(delta, &event->count);
+ return 0;
}
static void msr_event_start(struct perf_event *event, int flags)
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 6c98be8..a1c4ce00 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -59,7 +59,7 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-void hw_breakpoint_pmu_read(struct perf_event *bp);
+int hw_breakpoint_pmu_read(struct perf_event *bp);
void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
extern void
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8771766..e72ce6e 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -540,7 +540,8 @@ int hw_breakpoint_exceptions_notify(
return hw_breakpoint_handler(data);
}
-void hw_breakpoint_pmu_read(struct perf_event *bp)
+int hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
+ return 0;
}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index f96e1f9..46fd299 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -39,12 +39,14 @@ static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
{
- u64 counter, enabled, running;
+ u64 counter, counter_tmp, enabled, running;
counter = pmc->counter;
- if (pmc->perf_event)
- counter += perf_event_read_value(pmc->perf_event,
- &enabled, &running);
+ if (pmc->perf_event) {
+ if (!perf_event_read_value(pmc->perf_event, &counter_tmp,
+ &enabled, &running))
+ counter += counter_tmp;
+ }
/* FIXME: Scaling needed? */
return counter & pmc_bitmask(pmc);
}
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 8900823..2d27c70 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1033,9 +1033,10 @@ static u64 pmu_event_update(struct perf_event *event)
return new_raw_count;
}
-static void pmu_read(struct perf_event *event)
+static int pmu_read(struct perf_event *event)
{
pmu_event_update(event);
+ return 0;
}
static void pmu_event_set_period(struct perf_event *event)
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index d1074d9..846ed8b 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1145,9 +1145,10 @@ static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
hrtimer_cancel(&ccn->dt.hrtimer);
}
-static void arm_ccn_pmu_event_read(struct perf_event *event)
+static int arm_ccn_pmu_event_read(struct perf_event *event)
{
arm_ccn_pmu_event_update(event);
+ return 0;
}
static void arm_ccn_pmu_enable(struct pmu *pmu)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b572..aee7dff 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -163,10 +163,11 @@ u64 armpmu_event_update(struct perf_event *event)
return new_raw_count;
}
-static void
+static int
armpmu_read(struct perf_event *event)
{
armpmu_event_update(event);
+ return 0;
}
static void
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9f388d4..9120640 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -366,7 +366,7 @@ struct pmu {
* For sampling capable PMUs this will also update the software period
* hw_perf_event::period_left field.
*/
- void (*read) (struct perf_event *event);
+ int (*read) (struct perf_event *event);
/*
* Group events scheduling is treated as a transaction, add
@@ -886,8 +886,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
extern u64 perf_event_read_local(struct perf_event *event);
-extern u64 perf_event_read_value(struct perf_event *event,
- u64 *enabled, u64 *running);
+extern int perf_event_read_value(struct perf_event *event,
+ u64 *total, u64 *enabled, u64 *running);
struct perf_sample_data {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e11a16a..059e5bb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2721,7 +2721,7 @@ static void __perf_event_sync_stat(struct perf_event *event,
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
- event->pmu->read(event);
+ (void)event->pmu->read(event);
/* fall-through */
case PERF_EVENT_STATE_INACTIVE:
@@ -3473,6 +3473,7 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
+
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
@@ -3483,14 +3484,15 @@ static void __perf_event_read(void *info)
goto unlock;
if (!data->group) {
- pmu->read(event);
- data->ret = 0;
+ data->ret = pmu->read(event);
goto unlock;
}
pmu->start_txn(pmu, PERF_PMU_TXN_READ);
- pmu->read(event);
+ data->ret = pmu->read(event);
+ if (data->ret)
+ goto unlock;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
update_event_times(sub);
@@ -3499,7 +3501,9 @@ static void __perf_event_read(void *info)
* Use sibling's PMU rather than @event's since
* sibling could be on different (eg: software) PMU.
*/
- sub->pmu->read(sub);
+ data->ret = sub->pmu->read(sub);
+ if (data->ret)
+ goto unlock;
}
}
@@ -3520,6 +3524,7 @@ static inline u64 perf_event_count(struct perf_event *event)
* - either for the current task, or for this CPU
* - does not have inherit set, for inherited task events
* will not be local and we cannot read them atomically
+ * - pmu::read cannot fail
*/
u64 perf_event_read_local(struct perf_event *event)
{
@@ -3552,7 +3557,7 @@ u64 perf_event_read_local(struct perf_event *event)
* oncpu == -1).
*/
if (event->oncpu == smp_processor_id())
- event->pmu->read(event);
+ (void) event->pmu->read(event);
val = local64_read(&event->count);
local_irq_restore(flags);
@@ -3611,7 +3616,6 @@ static int perf_event_read(struct perf_event *event, bool group)
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
-
return ret;
}
@@ -4219,18 +4223,22 @@ static int perf_release(struct inode *inode, struct file *file)
return 0;
}
-u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+int perf_event_read_value(struct perf_event *event,
+ u64 *total, u64 *enabled, u64 *running)
{
struct perf_event *child;
- u64 total = 0;
+ int ret;
+ *total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
- (void)perf_event_read(event, false);
- total += perf_event_count(event);
+ ret = perf_event_read(event, false);
+ if (ret)
+ goto exit;
+ *total += perf_event_count(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
@@ -4238,14 +4246,17 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
- (void)perf_event_read(child, false);
- total += perf_event_count(child);
+ ret = perf_event_read(child, false);
+ if (ret)
+ goto exit;
+ *total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
+exit:
mutex_unlock(&event->child_mutex);
- return total;
+ return ret;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
@@ -4342,9 +4353,11 @@ static int perf_read_one(struct perf_event *event,
{
u64 enabled, running;
u64 values[4];
- int n = 0;
+ int n = 0, ret;
- values[n++] = perf_event_read_value(event, &enabled, &running);
+ ret = perf_event_read_value(event, &values[n++], &enabled, &running);
+ if (ret)
+ return ret;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
@@ -5625,7 +5638,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
values[n++] = running;
if (leader != event)
- leader->pmu->read(leader);
+ (void)leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
@@ -5638,7 +5651,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
if ((sub != event) &&
(sub->state == PERF_EVENT_STATE_ACTIVE))
- sub->pmu->read(sub);
+ (void)sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
@@ -7349,8 +7362,9 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
preempt_enable_notrace();
}
-static void perf_swevent_read(struct perf_event *event)
+static int perf_swevent_read(struct perf_event *event)
{
+ return 0;
}
static int perf_swevent_add(struct perf_event *event, int flags)
@@ -8263,7 +8277,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
- event->pmu->read(event);
+ (void)event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
@@ -8378,9 +8392,10 @@ static void cpu_clock_event_del(struct perf_event *event, int flags)
cpu_clock_event_stop(event, flags);
}
-static void cpu_clock_event_read(struct perf_event *event)
+static int cpu_clock_event_read(struct perf_event *event)
{
cpu_clock_event_update(event);
+ return 0;
}
static int cpu_clock_event_init(struct perf_event *event)
@@ -8455,13 +8470,14 @@ static void task_clock_event_del(struct perf_event *event, int flags)
task_clock_event_stop(event, PERF_EF_UPDATE);
}
-static void task_clock_event_read(struct perf_event *event)
+static int task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
u64 delta = now - event->ctx->timestamp;
u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
+ return 0;
}
static int task_clock_event_init(struct perf_event *event)
--
2.8.0.rc3.226.g39d4020
Powered by blists - more mailing lists