lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <115421572.nniJfEyVGO@rjwysocki.net>
Date: Fri, 08 Nov 2024 17:46:13 +0100
From: "Rafael J. Wysocki" <rjw@...ysocki.net>
To: Linux PM <linux-pm@...r.kernel.org>
Cc: LKML <linux-kernel@...r.kernel.org>, Lukasz Luba <lukasz.luba@....com>,
 Peter Zijlstra <peterz@...radead.org>,
 Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
 Len Brown <len.brown@...el.com>, Dietmar Eggemann <dietmar.eggemann@....com>,
 Morten Rasmussen <morten.rasmussen@....com>,
 Vincent Guittot <vincent.guittot@...aro.org>,
 Ricardo Neri <ricardo.neri-calderon@...ux.intel.com>
Subject:
 [RFC][PATCH v0.1 6/6] cpufreq: intel_pstate: Add basic EAS support on hybrid
 platforms

From: Rafael J. Wysocki <rafael.j.wysocki@...el.com>

Modify intel_pstate to register stub EM perf domains for CPUs on
hybrid platforms via em_dev_register_perf_domain() and to use
em_dev_expand_perf_domain() introduced previously for adding new
CPUs to existing EM perf domains when those CPUs become online for
the first time after driver initialization.

This change is targeting platforms (for example, Lunar Lake) where
"small" CPUs (E-cores) are always more energy-efficient than the "big"
or "performance" CPUs (P-cores) when run at the same HWP performance
level, so it is sufficient to tell the EAS that E-cores are always
preferred (so long as there is enough spare capacity on one of them
to run the given task).

Accordingly, the perf domains are registered per CPU type (that is,
all P-cores belong to one perf domain and all E-cores belong to another
perf domain) and they are registered only if asymmetric CPU capacity is
enabled.  Each perf domain has a one-element states table and that
element only contains the relative cost value (the other fields in
it are not initialized, so they are all equal to zero), and the cost
value for the E-core perf domain is lower.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@...el.com>
---
 drivers/cpufreq/intel_pstate.c |  110 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 104 insertions(+), 6 deletions(-)

Index: linux-pm/drivers/cpufreq/intel_pstate.c
===================================================================
--- linux-pm.orig/drivers/cpufreq/intel_pstate.c
+++ linux-pm/drivers/cpufreq/intel_pstate.c
@@ -8,6 +8,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/energy_model.h>
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
@@ -938,6 +939,12 @@ static struct freq_attr *hwp_cpufreq_att
 	NULL,
 };
 
+enum hybrid_cpu_type {
+	HYBRID_PCORE = 0,
+	HYBRID_ECORE,
+	HYBRID_NR_TYPES
+};
+
 static struct cpudata *hybrid_max_perf_cpu __read_mostly;
 /*
  * Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
@@ -945,6 +952,86 @@ static struct cpudata *hybrid_max_perf_c
  */
 static DEFINE_MUTEX(hybrid_capacity_lock);
 
+#ifdef CONFIG_ENERGY_MODEL
+struct hybrid_em_perf_domain {
+	cpumask_t cpumask;
+	struct device *dev;
+	struct em_data_callback cb;
+};
+
+static int hybrid_pcore_cost(struct device *dev, unsigned long freq,
+			     unsigned long *cost)
+{
+	/*
+	 * The number used here needs to be higher than the analogous
+	 * one in hybrid_ecore_cost() below.  The units and the actual
+	 * values don't matter.
+	 */
+	*cost = 2;
+	return 0;
+}
+
+static int hybrid_ecore_cost(struct device *dev, unsigned long freq,
+			     unsigned long *cost)
+{
+	*cost = 1;
+	return 0;
+}
+
+static struct hybrid_em_perf_domain perf_domains[HYBRID_NR_TYPES] = {
+	[HYBRID_PCORE] = { .cb.get_cost = hybrid_pcore_cost, },
+	[HYBRID_ECORE] = { .cb.get_cost = hybrid_ecore_cost, }
+};
+
+static bool hybrid_register_perf_domain(struct hybrid_em_perf_domain *pd)
+{
+	/*
+	 * Registering EM perf domains without asymmetric CPU capacity
+	 * support enabled is wasteful, so don't do that.
+	 */
+	if (!hybrid_max_perf_cpu)
+		return false;
+
+	pd->dev = get_cpu_device(cpumask_first(&pd->cpumask));
+	if (!pd->dev)
+		return false;
+
+	if (em_dev_register_perf_domain(pd->dev, 1, &pd->cb, &pd->cpumask, false)) {
+		pd->dev = NULL;
+		return false;
+	}
+
+	return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+	enum hybrid_cpu_type type;
+
+	for (type = HYBRID_PCORE; type < HYBRID_NR_TYPES; type++)
+		hybrid_register_perf_domain(&perf_domains[type]);
+}
+
+static void hybrid_add_to_perf_domain(int cpu, enum hybrid_cpu_type type)
+{
+	struct hybrid_em_perf_domain *pd = &perf_domains[type];
+
+	guard(mutex)(&hybrid_capacity_lock);
+
+	if (cpumask_test_cpu(cpu, &pd->cpumask))
+		return;
+
+	cpumask_set_cpu(cpu, &pd->cpumask);
+	if (pd->dev)
+		em_dev_expand_perf_domain(pd->dev, cpu);
+	else if (hybrid_register_perf_domain(pd))
+		em_rebuild_perf_domains();
+}
+#else /* CONFIG_ENERGY_MODEL */
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_add_to_perf_domain(int cpu, enum hybrid_cpu_type type) {}
+#endif /* !CONFIG_ENERGY_MODEL */
+
 static void hybrid_set_cpu_capacity(struct cpudata *cpu)
 {
 	arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
@@ -1034,11 +1121,14 @@ static void __hybrid_refresh_cpu_capacit
 	hybrid_update_cpu_capacity_scaling();
 }
 
-static void hybrid_refresh_cpu_capacity_scaling(void)
+static void hybrid_refresh_cpu_capacity_scaling(bool register_perf_domains)
 {
 	guard(mutex)(&hybrid_capacity_lock);
 
 	__hybrid_refresh_cpu_capacity_scaling();
+
+	if (register_perf_domains)
+		hybrid_register_all_perf_domains();
 }
 
 static void hybrid_init_cpu_capacity_scaling(bool refresh)
@@ -1049,7 +1139,7 @@ static void hybrid_init_cpu_capacity_sca
 	 * operation mode.
 	 */
 	if (refresh) {
-		hybrid_refresh_cpu_capacity_scaling();
+		hybrid_refresh_cpu_capacity_scaling(false);
 		return;
 	}
 
@@ -1059,10 +1149,14 @@ static void hybrid_init_cpu_capacity_sca
 	 * do not do that when SMT is in use.
 	 */
 	if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
-		hybrid_refresh_cpu_capacity_scaling();
+		/*
+		 * Perf domains are not registered before setting hybrid_max_perf_cpu,
+		 * so register them all after setting up CPU capacity scaling.
+		 */
+		hybrid_refresh_cpu_capacity_scaling(true);
 		/*
 		 * Disabling ITMT causes sched domains to be rebuilt to disable asym
-		 * packing and enable asym capacity.
+		 * packing and enable asym capacity and EAS.
 		 */
 		sched_clear_itmt_support();
 	}
@@ -2215,12 +2309,16 @@ static int hwp_get_cpu_scaling(int cpu)
 
 	smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
 	/* P-cores have a smaller perf level-to-freqency scaling factor. */
-	if (cpu_type == 0x40)
+	if (cpu_type == 0x40) {
+		hybrid_add_to_perf_domain(cpu, HYBRID_PCORE);
 		return hybrid_scaling_factor;
+	}
 
 	/* Use default core scaling for E-cores */
-	if (cpu_type == 0x20)
+	if (cpu_type == 0x20) {
+		hybrid_add_to_perf_domain(cpu, HYBRID_ECORE);
 		return core_get_scaling();
+	}
 
 	/*
 	 * If reached here, this system is either non-hybrid (like Tiger




Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ