[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210312004919.669614-14-samitolvanen@google.com>
Date: Thu, 11 Mar 2021 16:49:15 -0800
From: Sami Tolvanen <samitolvanen@...gle.com>
To: Kees Cook <keescook@...omium.org>
Cc: Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Masahiro Yamada <masahiroy@...nel.org>,
Will Deacon <will@...nel.org>, Jessica Yu <jeyu@...nel.org>,
Arnd Bergmann <arnd@...db.de>, Tejun Heo <tj@...nel.org>,
bpf@...r.kernel.org, linux-hardening@...r.kernel.org,
linux-arch@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-kbuild@...r.kernel.org, linux-pci@...r.kernel.org,
linux-kernel@...r.kernel.org,
Sami Tolvanen <samitolvanen@...gle.com>
Subject: [PATCH 13/17] arm64: use __pa_function
With CONFIG_CFI_CLANG, the compiler replaces function address
references with the address of the function's CFI jump table
entry. This means that __pa_symbol(function) returns the physical
address of the jump table entry, which can lead to address space
confusion as the jump table points to the function's virtual
address. Therefore, use the __pa_function() macro to ensure we are
always taking the address of the actual function instead.
Signed-off-by: Sami Tolvanen <samitolvanen@...gle.com>
---
arch/arm64/include/asm/mmu_context.h | 2 +-
arch/arm64/kernel/acpi_parking_protocol.c | 2 +-
arch/arm64/kernel/cpu-reset.h | 2 +-
arch/arm64/kernel/cpufeature.c | 2 +-
arch/arm64/kernel/psci.c | 3 ++-
arch/arm64/kernel/smp_spin_table.c | 2 +-
6 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 70ce8c1d2b07..519d535532be 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -157,7 +157,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
ttbr1 |= TTBR_CNP_BIT;
}
- replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
+ replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
replace_phys(ttbr1);
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index e7c941d8340d..e7f3af6043c5 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -99,7 +99,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
+ writeq_relaxed(__pa_function(secondary_entry), &mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index ed50e9587ad8..dfba8cf921e5 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -22,7 +22,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long entry,
unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
- restart = (void *)__pa_symbol(__cpu_soft_restart);
+ restart = (void *)__pa_function(__cpu_soft_restart);
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 066030717a4c..7ec1c2ccdc0b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1460,7 +1460,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (arm64_use_ng_mappings)
return;
- remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+ remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings);
cpu_install_idmap();
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 62d2bda7adb8..bfb1a6f8282d 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -38,7 +38,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa_function(secondary_entry));
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 056772c26098..a80ff9092e86 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -88,7 +88,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianness before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
+ writeq_relaxed(__pa_function(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));
--
2.31.0.rc2.261.g7f71774620-goog
Powered by blists - more mailing lists