[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4007f3ef167f93ffd9fcae841e144a0fc89f0117.camel@xry111.site>
Date: Thu, 28 Jul 2022 00:31:07 +0800
From: Xi Ruoyao <xry111@...111.site>
To: loongarch@...ts.linux.dev
Cc: linux-kernel@...r.kernel.org, WANG Xuerui <kernel@...0n.name>,
Huacai Chen <chenhuacai@...nel.org>
Subject: [PATCH 5/5] LoongArch: Support modules with new relocation types
If GAS 2.40 and/or GCC 13 is used to build the kernel, the modules will
contain R_LARCH_B26, R_LARCH_PCALA_HI20, R_LARCH_PCALA_LO12,
R_LARCH_GOT_PC_HI20, and R_LARCH_GOT_PC_LO12 relocations. Support them
in the module loader to allow a kernel built with latest toolchain
capable to load the modules.
Signed-off-by: Xi Ruoyao <xry111@...111.site>
---
arch/loongarch/include/asm/elf.h | 37 +++++++++++
arch/loongarch/kernel/module-sections.c | 12 +++-
arch/loongarch/kernel/module.c | 83 +++++++++++++++++++++++++
3 files changed, 130 insertions(+), 2 deletions(-)
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index f3960b18a90e..aeca7a9924ea 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -74,6 +74,43 @@
#define R_LARCH_SUB64 56
#define R_LARCH_GNU_VTINHERIT 57
#define R_LARCH_GNU_VTENTRY 58
+#define R_LARCH_B16 64
+#define R_LARCH_B21 65
+#define R_LARCH_B26 66
+#define R_LARCH_ABS_HI20 67
+#define R_LARCH_ABS_LO12 68
+#define R_LARCH_ABS64_LO20 69
+#define R_LARCH_ABS64_HI12 70
+#define R_LARCH_PCALA_HI20 71
+#define R_LARCH_PCALA_LO12 72
+#define R_LARCH_PCALA64_LO20 73
+#define R_LARCH_PCALA64_HI12 74
+#define R_LARCH_GOT_PC_HI20 75
+#define R_LARCH_GOT_PC_LO12 76
+#define R_LARCH_GOT64_PC_LO20 77
+#define R_LARCH_GOT64_PC_HI12 78
+#define R_LARCH_GOT_HI20 79
+#define R_LARCH_GOT_LO12 80
+#define R_LARCH_GOT64_LO20 81
+#define R_LARCH_GOT64_HI12 82
+#define R_LARCH_TLS_LE_HI20 83
+#define R_LARCH_TLS_LE_LO12 84
+#define R_LARCH_TLS_LE64_LO20 85
+#define R_LARCH_TLS_LE64_HI12 86
+#define R_LARCH_TLS_IE_PC_HI20 87
+#define R_LARCH_TLS_IE_PC_LO12 88
+#define R_LARCH_TLS_IE64_PC_LO20 89
+#define R_LARCH_TLS_IE64_PC_HI12 90
+#define R_LARCH_TLS_IE_HI20 91
+#define R_LARCH_TLS_IE_LO12 92
+#define R_LARCH_TLS_IE64_LO20 93
+#define R_LARCH_TLS_IE64_HI12 94
+#define R_LARCH_TLS_LD_PC_HI20 95
+#define R_LARCH_TLS_LD_HI20 96
+#define R_LARCH_TLS_GD_PC_HI20 97
+#define R_LARCH_TLS_GD_HI20 98
+#define R_LARCH_32_PCREL 99
+#define R_LARCH_RELAX 100
#ifndef ELF_ARCH
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
index 73976addbf60..9baf119388b9 100644
--- a/arch/loongarch/kernel/module-sections.c
+++ b/arch/loongarch/kernel/module-sections.c
@@ -76,12 +76,20 @@ static void count_max_entries(Elf_Rela *relas, int num,
for (i = 0; i < num; i++) {
type = ELF_R_TYPE(relas[i].r_info);
- if (type == R_LARCH_SOP_PUSH_PLT_PCREL) {
+ switch (type) {
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ case R_LARCH_B26:
if (!duplicate_rela(relas, i))
(*plts)++;
- } else if (type == R_LARCH_SOP_PUSH_GPREL)
+ break;
+ case R_LARCH_SOP_PUSH_GPREL:
+ case R_LARCH_GOT_PC_HI20:
if (!duplicate_rela(relas, i))
(*gots)++;
+ break;
+ default:
+ /* Do nothing. */
+ }
}
}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index e5f1fd022cd0..1aa6a58b1c21 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -291,6 +291,84 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
}
}
+static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ if (offset >= SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ if (offset < -SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ offset = (void *)v - (void *)location;
+
+ if (!signed_imm_check(offset, 28)) {
+ pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n",
+ mod->name, (long long)offset, type);
+ return -ENOEXEC;
+ }
+
+ if (offset & 3) {
+ pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n",
+ mod->name, (long long)offset, type);
+ return -ENOEXEC;
+ }
+
+ *location &= ~(u32)0x3ffffff;
+ *location |= (offset >> 18) & 0x3ff;
+ *location |= ((offset >> 2) & 0xffff) << 10;
+ return 0;
+}
+
+static int apply_r_larch_pcala_hi20(struct module *mod, u32 *location,
+ Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top,
+ unsigned int type)
+{
+ ptrdiff_t offset = (void *)((v + 0x800) & ~0xfff) -
+ (void *)((Elf_Addr)location & ~0xfff);
+
+ if (!signed_imm_check(offset, 32)) {
+ pr_err("module %s: PCALA offset = 0x%llx does not fit in 32-bit signed and is unsupported by kernel! dangerous %s (%u) relocation\n",
+ mod->name, (long long)offset, __func__, type);
+ return -ENOEXEC;
+ }
+
+ *location &= ~((u32)0xfffff << 5);
+ *location |= ((offset >> 12) & 0xfffff) << 5;
+ return 0;
+}
+
+static int apply_r_larch_got_pc_hi20(struct module *mod, u32 *location,
+ Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top,
+ unsigned int type)
+{
+ Elf_Addr got = module_emit_got_entry(mod, v);
+
+ return apply_r_larch_pcala_hi20(mod, location, got, rela_stack,
+ rela_stack_top, type);
+}
+
+static int apply_r_larch_pcala_lo12(struct module *mod, u32 *location,
+ Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top,
+ unsigned int type)
+{
+ *location &= ~((u32)0xfff << 10);
+ *location |= ((u32)v & 0xfff) << 10;
+ return 0;
+}
+
+static int apply_r_larch_got_pc_lo12(struct module *mod, u32 *location,
+ Elf_Addr v, s64 *rela_stack, size_t *rela_stack_top,
+ unsigned int type)
+{
+ Elf_Addr got = module_emit_got_entry(mod, v);
+
+ return apply_r_larch_pcala_lo12(mod, location, got, rela_stack,
+ rela_stack_top, type);
+}
+
/*
* reloc_handlers_rela() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to
@@ -321,6 +399,11 @@ static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_SOP_PUSH_GPREL] = apply_r_larch_sop_push_gprel,
+ [R_LARCH_B26] = apply_r_larch_b26,
+ [R_LARCH_PCALA_HI20] = apply_r_larch_pcala_hi20,
+ [R_LARCH_PCALA_LO12] = apply_r_larch_pcala_lo12,
+ [R_LARCH_GOT_PC_HI20] = apply_r_larch_got_pc_hi20,
+ [R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc_lo12,
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
--
2.37.0
Powered by blists - more mailing lists