[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250207161939.46139-18-ajones@ventanamicro.com>
Date: Fri, 7 Feb 2025 17:19:47 +0100
From: Andrew Jones <ajones@...tanamicro.com>
To: linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org
Cc: paul.walmsley@...ive.com,
palmer@...belt.com,
charlie@...osinc.com,
jesse@...osinc.com,
Anup Patel <apatel@...tanamicro.com>
Subject: [PATCH 7/9] riscv: Prepare for unaligned access type table lookups
Probing unaligned accesses on boot is time consuming. Provide a
function which will be used to look up the access type in a table
by id registers. Vendors which provide table entries can then skip
the probing.
Signed-off-by: Andrew Jones <ajones@...tanamicro.com>
---
arch/riscv/kernel/unaligned_access_speed.c | 114 ++++++++++++---------
1 file changed, 66 insertions(+), 48 deletions(-)
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index d9d4ca1fadc7..f8497097e79d 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -130,6 +130,50 @@ static void __init check_unaligned_access_nonboot_cpu(void *param)
check_unaligned_access(pages[cpu]);
}
+/* Measure unaligned access speed on all CPUs present at boot in parallel. */
+static void __init check_unaligned_access_speed_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+out:
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
+}
+#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
+static void __init check_unaligned_access_speed_all_cpus(void)
+{
+}
+#endif
+
DEFINE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
@@ -186,8 +230,17 @@ static int __init lock_and_set_unaligned_access_static_branch(void)
arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
+static bool check_unaligned_access_table(void)
+{
+ return false;
+}
+
static int riscv_online_cpu(unsigned int cpu)
{
+ if (check_unaligned_access_table())
+ goto exit;
+
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
static struct page *buf;
/* We are already set since the last check */
@@ -203,6 +256,7 @@ static int riscv_online_cpu(unsigned int cpu)
check_unaligned_access(buf);
__free_pages(buf, MISALIGNED_BUFFER_ORDER);
+#endif
exit:
set_unaligned_access_static_branches();
@@ -217,50 +271,6 @@ static int riscv_offline_cpu(unsigned int cpu)
return 0;
}
-/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static void __init check_unaligned_access_speed_all_cpus(void)
-{
- unsigned int cpu;
- unsigned int cpu_count = num_possible_cpus();
- struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
-
- if (!bufs) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return;
- }
-
- /*
- * Allocate separate buffers for each CPU so there's no fighting over
- * cache lines.
- */
- for_each_cpu(cpu, cpu_online_mask) {
- bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!bufs[cpu]) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- goto out;
- }
- }
-
- /* Check everybody except 0, who stays behind to tend jiffies. */
- on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
-
- /* Check core 0. */
- smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
-
-out:
- for_each_cpu(cpu, cpu_online_mask) {
- if (bufs[cpu])
- __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
- }
-
- kfree(bufs);
-}
-#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
-static void __init check_unaligned_access_speed_all_cpus(void)
-{
-}
-#endif
-
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
static void check_vector_unaligned_access(struct work_struct *work __always_unused)
{
@@ -370,6 +380,11 @@ static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __alway
}
#endif
+static bool check_vector_unaligned_access_table(void)
+{
+ return false;
+}
+
static int riscv_online_cpu_vec(unsigned int cpu)
{
if (!has_vector()) {
@@ -377,6 +392,9 @@ static int riscv_online_cpu_vec(unsigned int cpu)
return 0;
}
+ if (check_vector_unaligned_access_table())
+ return 0;
+
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
return 0;
@@ -392,13 +410,15 @@ static int __init check_unaligned_access_all_cpus(void)
{
int cpu;
- if (!check_unaligned_access_emulated_all_cpus())
+ if (!check_unaligned_access_table() &&
+ !check_unaligned_access_emulated_all_cpus())
check_unaligned_access_speed_all_cpus();
if (!has_vector()) {
for_each_online_cpu(cpu)
per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- } else if (!check_vector_unaligned_access_emulated_all_cpus() &&
+ } else if (!check_vector_unaligned_access_table() &&
+ !check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
@@ -408,10 +428,8 @@ static int __init check_unaligned_access_all_cpus(void)
* Setup hotplug callbacks for any new CPUs that come online or go
* offline.
*/
-#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
riscv_online_cpu, riscv_offline_cpu);
-#endif
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
riscv_online_cpu_vec, NULL);
--
2.48.1
Powered by blists - more mailing lists