[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1265796732.11509.260.camel@laptop>
Date: Wed, 10 Feb 2010 11:12:12 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Cyrill Gorcunov <gorcunov@...il.com>
Cc: Ingo Molnar <mingo@...e.hu>, Stephane Eranian <eranian@...gle.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Don Zickus <dzickus@...hat.com>,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: [RFC perf,x86] P4 PMU early draft
On Wed, 2010-02-10 at 01:39 +0300, Cyrill Gorcunov wrote:
> Index: linux-2.6.git/arch/x86/kernel/cpu/perf_event.c
> =====================================================================
> --- linux-2.6.git.orig/arch/x86/kernel/cpu/perf_event.c
> +++ linux-2.6.git/arch/x86/kernel/cpu/perf_event.c
> @@ -26,6 +26,7 @@
> #include <linux/bitops.h>
>
> #include <asm/apic.h>
> +#include <asm/perf_p4.h>
> #include <asm/stacktrace.h>
> #include <asm/nmi.h>
>
> @@ -140,6 +141,7 @@ struct x86_pmu {
> u64 max_period;
> u64 intel_ctrl;
> int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
> + int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign, int cpu);
> void (*enable_bts)(u64 config);
> void (*disable_bts)(void);
>
> +/*
> + * This is the most important routine of Netburst PMU actually
> + * and need a huge speedup!
> + */
> +static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign, int cpu)
> +{
> +}
> -static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
> +/* we don't use cpu argument here at all */
> +static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign, int cpu)
> {
> struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
> unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
> @@ -1796,7 +2305,7 @@ static int x86_pmu_enable(struct perf_ev
> if (n < 0)
> return n;
>
> - ret = x86_schedule_events(cpuc, n, assign);
> + ret = x86_pmu.schedule_events(cpuc, n, assign, 0);
> if (ret)
> return ret;
> /*
This look like a bug, surely we can run on !cpu0.
> @@ -2313,7 +2822,7 @@ int hw_perf_group_sched_in(struct perf_e
> if (n0 < 0)
> return n0;
>
> - ret = x86_schedule_events(cpuc, n0, assign);
> + ret = x86_pmu.schedule_events(cpuc, n0, assign, cpu);
> if (ret)
> return ret;
>
I'd try BUG_ON(cpu != smp_processor_id()) and scrap passing that cpu
thing around.
> @@ -2700,6 +3232,7 @@ static int validate_group(struct perf_ev
> {
> struct perf_event *leader = event->group_leader;
> struct cpu_hw_events *fake_cpuc;
> + int cpu = smp_processor_id();
> int ret, n;
>
> ret = -ENOMEM;
> @@ -2725,7 +3258,7 @@ static int validate_group(struct perf_ev
>
> fake_cpuc->n_events = n;
>
> - ret = x86_schedule_events(fake_cpuc, n, NULL);
> + ret = x86_pmu.schedule_events(fake_cpuc, n, NULL, cpu);
>
> out_free:
> kfree(fake_cpuc);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists