[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1433260778-26497-4-git-send-email-maddy@linux.vnet.ibm.com>
Date: Tue, 2 Jun 2015 21:29:32 +0530
From: Madhavan Srinivasan <maddy@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Cc: Madhavan Srinivasan <maddy@...ux.vnet.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Sukadev Bhattiprolu <sukadev@...ux.vnet.ibm.com>,
Anshuman Khandual <khandual@...ux.vnet.ibm.com>,
Stephane Eranian <eranian@...gle.com>,
Preeti U Murthy <preeti@...ux.vnet.ibm.com>,
Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH v1 3/9]powerpc/powernv: Add cpu hotplug support
Patch adds cpu hotplug support. First online cpu in a node is picked as
designated thread to read the Nest pmu counter data, and at the time of
hotplug, next online cpu from the same node is picked up.
Cc: Michael Ellerman <mpe@...erman.id.au>
Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
Cc: Paul Mackerras <paulus@...ba.org>
Cc: Sukadev Bhattiprolu <sukadev@...ux.vnet.ibm.com>
Cc: Anshuman Khandual <khandual@...ux.vnet.ibm.com>
Cc: Stephane Eranian <eranian@...gle.com>
Cc: Preeti U Murthy <preeti@...ux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Madhavan Srinivasan <maddy@...ux.vnet.ibm.com>
---
arch/powerpc/perf/nest-pmu.c | 84 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 84 insertions(+)
diff --git a/arch/powerpc/perf/nest-pmu.c b/arch/powerpc/perf/nest-pmu.c
index d4413bb..3e7010e 100644
--- a/arch/powerpc/perf/nest-pmu.c
+++ b/arch/powerpc/perf/nest-pmu.c
@@ -30,6 +30,86 @@ static struct attribute_group cpumask_nest_pmu_attr_group = {
.attrs = cpumask_nest_pmu_attrs,
};
+static void nest_init(void *dummy)
+{
+ opal_nest_ima_control(P8_NEST_ENGINE_START);
+}
+
+static void nest_change_cpu_context(int old_cpu, int new_cpu)
+{
+ int i;
+
+ for (i=0; per_nestpmu_arr[i] != NULL; i++)
+ perf_pmu_migrate_context(&per_nestpmu_arr[i]->pmu,
+ old_cpu, new_cpu);
+ return;
+}
+
+static void nest_exit_cpu(int cpu)
+{
+ int i, nid, target = -1;
+ const struct cpumask *l_cpumask;
+ int src_chipid;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask_nest_pmu))
+ return;
+
+ nid = cpu_to_node(cpu);
+ src_chipid = topology_physical_package_id(cpu);
+ l_cpumask = cpumask_of_node(nid);
+ for_each_cpu(i, l_cpumask) {
+ if (i == cpu)
+ continue;
+ if (src_chipid == topology_physical_package_id(i)) {
+ target = i;
+ break;
+ }
+ }
+
+ cpumask_set_cpu(target, &cpu_mask_nest_pmu);
+ nest_change_cpu_context (cpu, target);
+ return;
+}
+
+static void nest_init_cpu(int cpu)
+{
+ int i, src_chipid;
+
+ src_chipid = topology_physical_package_id(cpu);
+ for_each_cpu(i, &cpu_mask_nest_pmu)
+ if (src_chipid == topology_physical_package_id(i))
+ return;
+
+ cpumask_set_cpu(cpu, &cpu_mask_nest_pmu);
+ nest_change_cpu_context ( -1, cpu);
+ return;
+}
+
+static int nest_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_FAILED:
+ case CPU_STARTING:
+ nest_init_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ nest_exit_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nest_cpu_nb = {
+ .notifier_call = nest_cpu_notifier,
+ .priority = CPU_PRI_PERF + 1,
+}
+
void cpumask_chip(void)
{
const struct cpumask *l_cpumask;
@@ -47,6 +127,10 @@ void cpumask_chip(void)
cpumask_set_cpu(cpu, &cpu_mask_nest_pmu);
}
+ on_each_cpu_mask(&cpu_mask_nest_pmu, (smp_call_func_t)nest_init, NULL, 1);
+
+ __register_cpu_notifier(&nest_cpu_nb);
+
cpu_notifier_register_done();
}
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists