[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <37D7C6CF3E00A74B8858931C1DB2F0770588AF38@shsmsx102.ccr.corp.intel.com>
Date: Wed, 17 Feb 2016 15:49:37 +0000
From: "Liang, Kan" <kan.liang@...el.com>
To: Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>
CC: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>, Borislav Petkov <bp@...en8.de>,
Stephane Eranian <eranian@...gle.com>,
"Chegondi, Harish" <harish.chegondi@...el.com>,
"Kleen, Andi" <andi.kleen@...el.com>
Subject: RE: [patch 04/11] x86/perf/intel_uncore: Cleanup hardware on exit
>
> When tearing down the boxes nothing undoes the hardware state which
> was setup by box->init_box(). Add a box->exit_box() callback and
> implement it for the uncores which have an init_box() callback.
I don't think we need exit_box.
Because in disable_box we already freezes the box.
Also, writing 0 cannot clear hardware state. It will unfreeze the box.
The counter will start to count.
>
> This misses the cleanup in the error exit pathes, but I cannot be bothered
> to implement it before cleaning up the rest of the driver, which makes that
> task way simpler.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> ---
> arch/x86/kernel/cpu/perf_event_intel_uncore.c | 6 +-
> arch/x86/kernel/cpu/perf_event_intel_uncore.h | 9 +++
> arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c | 6 ++
> arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c | 13 ++++
> arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | 57
> +++++++++++++++++++-
> 5 files changed, 88 insertions(+), 3 deletions(-)
>
> --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
> @@ -927,6 +927,7 @@ static int uncore_pci_probe(struct pci_d
> raw_spin_lock(&uncore_box_lock);
> list_del(&box->list);
> raw_spin_unlock(&uncore_box_lock);
> + uncore_box_exit(box);
> kfree(box);
> }
> return ret;
> @@ -972,6 +973,7 @@ static void uncore_pci_remove(struct pci
> }
>
> WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
> + uncore_box_exit(box);
> kfree(box);
>
> if (last_box)
> @@ -1079,8 +1081,10 @@ static void uncore_cpu_dying(int cpu)
> pmu = &type->pmus[j];
> box = *per_cpu_ptr(pmu->box, cpu);
> *per_cpu_ptr(pmu->box, cpu) = NULL;
> - if (box && atomic_dec_and_test(&box->refcnt))
> + if (box && atomic_dec_and_test(&box->refcnt)) {
> list_add(&box->list, &boxes_to_free);
> + uncore_box_exit(box);
> + }
> }
> }
> }
> --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
> +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
> @@ -61,6 +61,7 @@ struct intel_uncore_type {
>
> struct intel_uncore_ops {
> void (*init_box)(struct intel_uncore_box *);
> + void (*exit_box)(struct intel_uncore_box *);
> void (*disable_box)(struct intel_uncore_box *);
> void (*enable_box)(struct intel_uncore_box *);
> void (*disable_event)(struct intel_uncore_box *, struct
> perf_event *); @@ -306,6 +307,14 @@ static inline void
> uncore_box_init(struc
> }
> }
>
> +static inline void uncore_box_exit(struct intel_uncore_box *box) {
> + if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box-
> >flags)) {
> + if (box->pmu->type->ops->exit_box)
> + box->pmu->type->ops->exit_box(box);
> + }
> +}
> +
> static inline bool uncore_box_is_fake(struct intel_uncore_box *box) {
> return (box->phys_id < 0);
> --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
> @@ -201,6 +201,11 @@ static void nhmex_uncore_msr_init_box(st
> wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL,
> NHMEX_U_PMON_GLOBAL_EN_ALL); }
>
> +static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
> {
> + wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); }
> +
> static void nhmex_uncore_msr_disable_box(struct intel_uncore_box
> *box) {
> unsigned msr = uncore_msr_box_ctl(box); @@ -250,6 +255,7 @@
> static void nhmex_uncore_msr_enable_even
>
> #define NHMEX_UNCORE_OPS_COMMON_INIT()
> \
> .init_box = nhmex_uncore_msr_init_box, \
> + .exit_box = nhmex_uncore_msr_exit_box, \
> .disable_box = nhmex_uncore_msr_disable_box, \
> .enable_box = nhmex_uncore_msr_enable_box, \
> .disable_event = nhmex_uncore_msr_disable_event, \
> --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
> @@ -95,6 +95,12 @@ static void snb_uncore_msr_init_box(stru
> }
> }
>
> +static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) {
> + if (box->pmu->pmu_idx == 0)
> + wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
> +}
> +
> static struct uncore_event_desc snb_uncore_events[] = {
> INTEL_UNCORE_EVENT_DESC(clockticks,
> "event=0xff,umask=0x00"),
> { /* end: all zeroes */ },
> @@ -116,6 +122,7 @@ static struct attribute_group snb_uncore
>
> static struct intel_uncore_ops snb_uncore_msr_ops = {
> .init_box = snb_uncore_msr_init_box,
> + .exit_box = snb_uncore_msr_exit_box,
> .disable_event = snb_uncore_msr_disable_event,
> .enable_event = snb_uncore_msr_enable_event,
> .read_counter = uncore_msr_read_counter,
> @@ -231,6 +238,11 @@ static void snb_uncore_imc_init_box(stru
> box->hrtimer_duration =
> UNCORE_SNB_IMC_HRTIMER_INTERVAL; }
>
> +static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) {
> + iounmap(box->io_addr);
> +}
> +
> static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) {}
>
> @@ -458,6 +470,7 @@ static struct pmu snb_uncore_imc_pmu = {
>
> static struct intel_uncore_ops snb_uncore_imc_ops = {
> .init_box = snb_uncore_imc_init_box,
> + .exit_box = snb_uncore_imc_exit_box,
> .enable_box = snb_uncore_imc_enable_box,
> .disable_box = snb_uncore_imc_disable_box,
> .disable_event = snb_uncore_imc_disable_event,
> --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
> @@ -387,6 +387,14 @@ static void snbep_uncore_pci_init_box(st
> pci_write_config_dword(pdev, box_ctl,
> SNBEP_PMON_BOX_CTL_INT); }
>
> +static void snbep_uncore_pci_exit_box(struct intel_uncore_box *box) {
> + struct pci_dev *pdev = box->pci_dev;
> + int box_ctl = uncore_pci_box_ctl(box);
> +
> + pci_write_config_dword(pdev, box_ctl, 0); }
> +
> static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
> {
> u64 config;
> @@ -440,6 +448,14 @@ static void snbep_uncore_msr_init_box(st
> wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
> }
>
> +static void snbep_uncore_msr_exit_box(struct intel_uncore_box *box) {
> + unsigned msr = uncore_msr_box_ctl(box);
> +
> + if (msr)
> + wrmsrl(msr, 0);
> +}
> +
> static struct attribute *snbep_uncore_formats_attr[] = {
> &format_attr_event.attr,
> &format_attr_umask.attr,
> @@ -567,7 +583,8 @@ static struct attribute_group snbep_unco
>
> #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()
> \
> __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
> \
> - .init_box = snbep_uncore_msr_init_box \
> + .init_box = snbep_uncore_msr_init_box, \
> + .exit_box = snbep_uncore_msr_exit_box \
>
> static struct intel_uncore_ops snbep_uncore_msr_ops = {
> SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
> @@ -575,6 +592,7 @@ static struct intel_uncore_ops snbep_unc
>
> #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
> .init_box = snbep_uncore_pci_init_box, \
> + .exit_box = snbep_uncore_pci_exit_box, \
> .disable_box = snbep_uncore_pci_disable_box, \
> .enable_box = snbep_uncore_pci_enable_box, \
> .disable_event = snbep_uncore_pci_disable_event, \
> @@ -1236,10 +1254,19 @@ int snbep_uncore_pci_init(void) static void
> ivbep_uncore_msr_init_box(struct intel_uncore_box *box) {
> unsigned msr = uncore_msr_box_ctl(box);
> +
> if (msr)
> wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
> }
>
> +static void ivbep_uncore_msr_exit_box(struct intel_uncore_box *box) {
> + unsigned msr = uncore_msr_box_ctl(box);
> +
> + if (msr)
> + wrmsrl(msr, 0);
> +}
> +
> static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) {
> struct pci_dev *pdev = box->pci_dev;
> @@ -1247,8 +1274,16 @@ static void ivbep_uncore_pci_init_box(st
> pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
> IVBEP_PMON_BOX_CTL_INT); }
>
> +static void ivbep_uncore_pci_exit_box(struct intel_uncore_box *box) {
> + struct pci_dev *pdev = box->pci_dev;
> +
> + pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, 0); }
> +
> #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()
> \
> .init_box = ivbep_uncore_msr_init_box, \
> + .exit_box = ivbep_uncore_msr_exit_box, \
> .disable_box = snbep_uncore_msr_disable_box, \
> .enable_box = snbep_uncore_msr_enable_box, \
> .disable_event = snbep_uncore_msr_disable_event, \
> @@ -1261,6 +1296,7 @@ static struct intel_uncore_ops ivbep_unc
>
> static struct intel_uncore_ops ivbep_uncore_pci_ops = {
> .init_box = ivbep_uncore_pci_init_box,
> + .exit_box = ivbep_uncore_pci_exit_box,
> .disable_box = snbep_uncore_pci_disable_box,
> .enable_box = snbep_uncore_pci_enable_box,
> .disable_event = snbep_uncore_pci_disable_event,
> @@ -1497,6 +1533,7 @@ static void ivbep_cbox_enable_event(stru
>
> static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
> .init_box = ivbep_uncore_msr_init_box,
> + .exit_box = ivbep_uncore_msr_exit_box,
> .disable_box = snbep_uncore_msr_disable_box,
> .enable_box = snbep_uncore_msr_enable_box,
> .disable_event = snbep_uncore_msr_disable_event,
> @@ -1613,6 +1650,7 @@ static u64 ivbep_uncore_irp_read_counter
>
> static struct intel_uncore_ops ivbep_uncore_irp_ops = {
> .init_box = ivbep_uncore_pci_init_box,
> + .exit_box = ivbep_uncore_pci_exit_box,
> .disable_box = snbep_uncore_pci_disable_box,
> .enable_box = snbep_uncore_pci_enable_box,
> .disable_event = ivbep_uncore_irp_disable_event,
> @@ -1633,6 +1671,7 @@ static struct intel_uncore_type ivbep_un
>
> static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
> .init_box = ivbep_uncore_pci_init_box,
> + .exit_box = ivbep_uncore_pci_exit_box,
> .disable_box = snbep_uncore_pci_disable_box,
> .enable_box = snbep_uncore_pci_enable_box,
> .disable_event = snbep_uncore_pci_disable_event,
> @@ -1914,6 +1953,7 @@ static void hswep_cbox_enable_event(stru
>
> static struct intel_uncore_ops knl_uncore_cha_ops = {
> .init_box = snbep_uncore_msr_init_box,
> + .exit_box = snbep_uncore_msr_exit_box,
> .disable_box = snbep_uncore_msr_disable_box,
> .enable_box = snbep_uncore_msr_enable_box,
> .disable_event = snbep_uncore_msr_disable_event,
> @@ -2008,6 +2048,7 @@ static void knl_uncore_imc_enable_event(
>
> static struct intel_uncore_ops knl_uncore_imc_ops = {
> .init_box = snbep_uncore_pci_init_box,
> + .exit_box = snbep_uncore_pci_exit_box,
> .disable_box = snbep_uncore_pci_disable_box,
> .enable_box = knl_uncore_imc_enable_box,
> .read_counter = snbep_uncore_pci_read_counter,
> @@ -2397,6 +2438,7 @@ static void hswep_cbox_enable_event(stru
>
> static struct intel_uncore_ops hswep_uncore_cbox_ops = {
> .init_box = snbep_uncore_msr_init_box,
> + .exit_box = snbep_uncore_msr_exit_box,
> .disable_box = snbep_uncore_msr_disable_box,
> .enable_box = snbep_uncore_msr_enable_box,
> .disable_event = snbep_uncore_msr_disable_event,
> @@ -2442,9 +2484,19 @@ static void hswep_uncore_sbox_msr_init_b
> }
> }
>
> +static void hswep_uncore_sbox_msr_exit_box(struct intel_uncore_box
> +*box) {
> + unsigned msr = uncore_msr_box_ctl(box);
> +
> + /* CHECKME: Does this need the bit dance like init() ? */
> + if (msr)
> + wrmsrl(msr, 0);
> +}
> +
> static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
> __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
> - .init_box = hswep_uncore_sbox_msr_init_box
> + .init_box = hswep_uncore_sbox_msr_init_box,
> + .exit_box = hswep_uncore_sbox_msr_exit_box
> };
>
> static struct attribute *hswep_uncore_sbox_formats_attr[] = { @@ -
> 2584,6 +2636,7 @@ static u64 hswep_uncore_irp_read_counter
>
> static struct intel_uncore_ops hswep_uncore_irp_ops = {
> .init_box = snbep_uncore_pci_init_box,
> + .exit_box = snbep_uncore_pci_exit_box,
> .disable_box = snbep_uncore_pci_disable_box,
> .enable_box = snbep_uncore_pci_enable_box,
> .disable_event = ivbep_uncore_irp_disable_event,
>
Powered by blists - more mailing lists