[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240229162520.970986-2-vanshikonda@os.amperecomputing.com>
Date: Thu, 29 Feb 2024 08:25:13 -0800
From: Vanshidhar Konda <vanshikonda@...amperecomputing.com>
To: Huisong Li <lihuisong@...wei.com>,
Beata Michalska <beata.michalska@....com>
Cc: Vanshidhar Konda <vanshikonda@...amperecomputing.com>,
Ionela Voinescu <ionela.voinescu@....com>,
linux-kernel@...r.kernel.org,
linux-pm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
rafael@...nel.org,
sumitg@...dia.com,
zengheng4@...wei.com,
yang@...amperecomputing.com,
will@...nel.org,
sudeep.holla@....com,
liuyonglong@...wei.com,
zhanjie9@...ilicon.com,
linux-acpi@...r.kernel.org
Subject: [PATCH v1 1/3] arm64: topology: Add arch_freq_get_on_cpu() support
AMU counters are used by the Frequency Invariance Engine (FIE) to
estimate the CPU utilization during each tick. The delta of the AMU
counters between two ticks can also be used to estimate the average CPU
frequency of each core over the tick duration. Measure the AMU counters
during tick, compute the delta and store it. When the frequency of the
core is queried, use the stored delta to determine the frequency.
arch_freq_get_on_cpu() is used on x86 systems to estimate the frequency
of each CPU. It can be wired up on arm64 for the same functionality.
Signed-off-by: Vanshidhar Konda <vanshikonda@...amperecomputing.com>
---
arch/arm64/kernel/topology.c | 114 +++++++++++++++++++++++++++++------
1 file changed, 96 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 1a2c72f3e7f8..db8d14525cf4 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -17,6 +17,8 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/sched/isolation.h>
+#include <linux/seqlock_types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -82,20 +84,54 @@ int __init parse_acpi_topology(void)
#undef pr_fmt
#define pr_fmt(fmt) "AMU: " fmt
+struct amu_counters {
+ seqcount_t seq;
+ unsigned long last_update;
+ u64 core_cnt;
+ u64 const_cnt;
+ u64 delta_core_cnt;
+ u64 delta_const_cnt;
+};
+
/*
* Ensure that amu_scale_freq_tick() will return SCHED_CAPACITY_SCALE until
* the CPU capacity and its associated frequency have been correctly
* initialized.
*/
-static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT);
-static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
-static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
+static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) =
+ 1UL << (2 * SCHED_CAPACITY_SHIFT);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct amu_counters, cpu_samples) = {
+ .seq = SEQCNT_ZERO(cpu_samples.seq)
+};
static cpumask_var_t amu_fie_cpus;
void update_freq_counters_refs(void)
{
- this_cpu_write(arch_core_cycles_prev, read_corecnt());
- this_cpu_write(arch_const_cycles_prev, read_constcnt());
+ struct amu_counters *cpu_sample = this_cpu_ptr(&cpu_samples);
+ u64 core_cnt, const_cnt, delta_core_cnt, delta_const_cnt;
+
+ const_cnt = read_constcnt();
+ core_cnt = read_corecnt();
+
+ if (unlikely(core_cnt < cpu_sample->core_cnt) ||
+ unlikely(const_cnt < cpu_sample->const_cnt)) {
+ WARN(1, "AMU counter values should be monotonic.\n");
+ cpu_sample->delta_const_cnt = 0;
+ cpu_sample->delta_core_cnt = 0;
+ return;
+ }
+
+ delta_core_cnt = core_cnt - cpu_sample->core_cnt;
+ delta_const_cnt = const_cnt - cpu_sample->const_cnt;
+
+ cpu_sample->core_cnt = core_cnt;
+ cpu_sample->const_cnt = const_cnt;
+
+ raw_write_seqcount_begin(&cpu_sample->seq);
+ cpu_sample->last_update = jiffies;
+ cpu_sample->delta_const_cnt = delta_const_cnt;
+ cpu_sample->delta_core_cnt = delta_core_cnt;
+ raw_write_seqcount_end(&cpu_sample->seq);
}
static inline bool freq_counters_valid(int cpu)
@@ -108,8 +144,7 @@ static inline bool freq_counters_valid(int cpu)
return false;
}
- if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
- !per_cpu(arch_core_cycles_prev, cpu))) {
+ if (unlikely(per_cpu_ptr(&cpu_samples, cpu) == NULL)) {
pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
return false;
}
@@ -152,19 +187,15 @@ void freq_inv_set_max_ratio(int cpu, u64 max_rate)
static void amu_scale_freq_tick(void)
{
- u64 prev_core_cnt, prev_const_cnt;
- u64 core_cnt, const_cnt, scale;
-
- prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
- prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
+ struct amu_counters *cpu_sample = this_cpu_ptr(&cpu_samples);
+ u64 delta_core_cnt, delta_const_cnt, scale;
update_freq_counters_refs();
- const_cnt = this_cpu_read(arch_const_cycles_prev);
- core_cnt = this_cpu_read(arch_core_cycles_prev);
+ delta_const_cnt = cpu_sample->delta_const_cnt;
+ delta_core_cnt = cpu_sample->delta_core_cnt;
- if (unlikely(core_cnt <= prev_core_cnt ||
- const_cnt <= prev_const_cnt))
+ if ((delta_const_cnt == 0) || (delta_core_cnt == 0))
return;
/*
@@ -175,15 +206,62 @@ static void amu_scale_freq_tick(void)
* See validate_cpu_freq_invariance_counters() for details on
* arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
*/
- scale = core_cnt - prev_core_cnt;
+ scale = delta_core_cnt;
scale *= this_cpu_read(arch_max_freq_scale);
scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
- const_cnt - prev_const_cnt);
+ delta_const_cnt);
scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
this_cpu_write(arch_freq_scale, (unsigned long)scale);
}
+/*
+ * Discard samples older than the define maximum sample age of 20ms. There
+ * is no point in sending IPIs in such a case. If the scheduler tick was
+ * not running then the CPU is either idle or isolated.
+ */
+#define MAX_SAMPLE_AGE ((unsigned long)HZ / 50)
+
+unsigned int arch_freq_get_on_cpu(int cpu)
+{
+ struct amu_counters *cpu_sample = per_cpu_ptr(&cpu_samples, cpu);
+ u64 delta_const_cnt, delta_core_cnt;
+ unsigned int seq, freq;
+ unsigned long last;
+
+ if (!freq_counters_valid(cpu))
+ goto fallback;
+
+ do {
+ seq = raw_read_seqcount_begin(&cpu_sample->seq);
+ last = cpu_sample->last_update;
+ delta_core_cnt = cpu_sample->delta_core_cnt;
+ delta_const_cnt = cpu_sample->delta_const_cnt;
+ } while (read_seqcount_retry(&cpu_sample->seq, seq));
+
+ /*
+ * Bail on invalid count and when the last update was too long ago,
+ * which covers idle and NOHZ full CPUs.
+ */
+ if (!delta_const_cnt || ((jiffies - last) > MAX_SAMPLE_AGE)) {
+ if (!(housekeeping_cpu(cpu, HK_TYPE_TICK) && idle_cpu(cpu)))
+ goto fallback;
+ }
+
+ /*
+ * CPU frequency = reference perf (in Hz) * (/\ delivered) / (/\ reference)
+ * AMU reference performance counter increment rate is equal to the rate
+ * of increment of the System counter, CNTPCT_EL0 and can be used to
+ * compute the CPU frequency.
+ */
+ return div64_u64((delta_core_cnt * (arch_timer_get_rate() / HZ)),
+ delta_const_cnt);
+
+fallback:
+ freq = cpufreq_quick_get(cpu);
+ return freq ? freq : cpufreq_get_hw_max_freq(cpu);
+}
+
static struct scale_freq_data amu_sfd = {
.source = SCALE_FREQ_SOURCE_ARCH,
.set_freq_scale = amu_scale_freq_tick,
--
2.43.1
Powered by blists - more mailing lists