[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200326032420.27220-15-pasha.tatashin@soleen.com>
Date: Wed, 25 Mar 2020 23:24:16 -0400
From: Pavel Tatashin <pasha.tatashin@...een.com>
To: pasha.tatashin@...een.com, jmorris@...ei.org, sashal@...nel.org,
ebiederm@...ssion.com, kexec@...ts.infradead.org,
linux-kernel@...r.kernel.org, corbet@....net,
catalin.marinas@....com, will@...nel.org,
linux-arm-kernel@...ts.infradead.org, maz@...nel.org,
james.morse@....com, vladimir.murzin@....com,
matthias.bgg@...il.com, bhsharma@...hat.com, linux-mm@...ck.org,
mark.rutland@....com, steve.capper@....com, rfontana@...hat.com,
tglx@...utronix.de, selindag@...il.com
Subject: [PATCH v9 14/18] arm64: kexec: offset for relocation function
Soon, relocation function will share the same page with EL2 vectors.
Add offset within this page to arm64_relocate_new_kernel, and also
the total size of relocation code which will include both the function
and the EL2 vectors.
Signed-off-by: Pavel Tatashin <pasha.tatashin@...een.com>
---
arch/arm64/include/asm/kexec.h | 7 +++++++
arch/arm64/kernel/machine_kexec.c | 13 ++++---------
arch/arm64/kernel/relocate_kernel.S | 16 +++++++++++-----
3 files changed, 22 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 990185744148..d944c2e289b2 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -90,6 +90,13 @@ static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
+#if defined(CONFIG_KEXEC_CORE)
+/* The beginning and size of relcation code to stage 2 kernel */
+extern const unsigned long kexec_relocate_code_size;
+extern const unsigned char kexec_relocate_code_start[];
+extern const unsigned long kexec_kern_reloc_offset;
+#endif
+
/*
* kern_reloc_arg is passed to kernel relocation function as an argument.
* head kimage->head, allows to traverse through relocation segments.
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index b1122eea627e..ab571fca9bd1 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -23,10 +23,6 @@
#include "cpu-reset.h"
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
/**
* kexec_image_info - For debugging output.
*/
@@ -82,9 +78,8 @@ int machine_kexec_post_load(struct kimage *kimage)
if (!kern_reloc_arg)
return -ENOMEM;
- memcpy(reloc_code, arm64_relocate_new_kernel,
- arm64_relocate_new_kernel_size);
- kimage->arch.kern_reloc = __pa(reloc_code);
+ memcpy(reloc_code, kexec_relocate_code_start, kexec_relocate_code_size);
+ kimage->arch.kern_reloc = __pa(reloc_code) + kexec_kern_reloc_offset;
kimage->arch.kern_reloc_arg = __pa(kern_reloc_arg);
kern_reloc_arg->head = kimage->head;
kern_reloc_arg->entry_addr = kimage->start;
@@ -189,7 +184,7 @@ void machine_kexec(struct kimage *kimage)
"Some CPUs may be stale, kdump will be unreliable.\n");
/* Flush the reboot_code_buffer in preparation for its execution. */
- __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
+ __flush_dcache_area(reboot_code_buffer, kexec_relocate_code_size);
/*
* Although we've killed off the secondary CPUs, we don't update
@@ -198,7 +193,7 @@ void machine_kexec(struct kimage *kimage)
* the offline CPUs. Therefore, we must use the __* variant here.
*/
__flush_icache_range((uintptr_t)reboot_code_buffer,
- arm64_relocate_new_kernel_size);
+ kexec_relocate_code_size);
/* Flush the kimage list and its buffers. */
kexec_list_flush(kimage);
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index 22ccdcb106d3..aa9f2b2cd77c 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -14,6 +14,9 @@
#include <asm/page.h>
#include <asm/sysreg.h>
+.globl kexec_relocate_code_start
+kexec_relocate_code_start:
+
/*
* arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
*
@@ -86,13 +89,16 @@ ENTRY(arm64_relocate_new_kernel)
.ltorg
END(arm64_relocate_new_kernel)
-.Lcopy_end:
+.Lkexec_relocate_code_end:
.org KEXEC_CONTROL_PAGE_SIZE
.align 3 /* To keep the 64-bit values below naturally aligned. */
/*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
+ * kexec_relocate_code_size - Number of bytes to copy to the
* control_code_page.
*/
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
- .quad .Lcopy_end - arm64_relocate_new_kernel
+.globl kexec_relocate_code_size
+kexec_relocate_code_size:
+ .quad .Lkexec_relocate_code_end - kexec_relocate_code_start
+.globl kexec_kern_reloc_offset
+kexec_kern_reloc_offset:
+ .quad arm64_relocate_new_kernel - kexec_relocate_code_start
--
2.17.1
Powered by blists - more mailing lists