[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180627160604.8154-4-ard.biesheuvel@linaro.org>
Date: Wed, 27 Jun 2018 18:06:02 +0200
From: Ard Biesheuvel <ard.biesheuvel@...aro.org>
To: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
x86@...nel.org
Cc: Ard Biesheuvel <ard.biesheuvel@...aro.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Arnd Bergmann <arnd@...db.de>,
Steven Rostedt <rostedt@...dmis.org>,
Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH 3/5] arm64/kernel: jump_label: switch to relative references
On a randomly chosen distro kernel build for arm64, vmlinux.o shows the
following sections, containing jump label entries, and the associated
RELA relocation records, respectively:
...
[38088] __jump_table PROGBITS 0000000000000000 00e19f30
000000000002ea10 0000000000000000 WA 0 0 8
[38089] .rela__jump_table RELA 0000000000000000 01fd8bb0
000000000008be30 0000000000000018 I 38178 38088 8
...
In other words, we have 190 KB worth of 'struct jump_entry' instances,
and 573 KB worth of RELA entries to relocate each entry's code, target
and key members. This means the RELA section occupies 10% of the .init
segment, and the two sections combined represent 5% of vmlinux's entire
memory footprint.
So let's switch from 64-bit absolute references to 32-bit relative
references: this reduces the size of the __jump_table by 50%, and gets
rid of the RELA section entirely.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@...aro.org>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/jump_label.h | 16 ++++------------
arch/arm64/kernel/jump_label.c | 6 +++---
3 files changed, 8 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1940c6405d04..d17aa9614e69 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -91,6 +91,7 @@ config ARM64
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e843c3a..5e3e31df3805 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -30,8 +30,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
{
asm goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- ".align 3\n\t"
- ".quad 1b, %l[l_yes], %c0\n\t"
+ ".align 2\n\t"
+ ".long 1b - ., %l[l_yes] - ., %c0 - .\n\t"
".popsection\n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
@@ -44,8 +44,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
{
asm goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- ".align 3\n\t"
- ".quad 1b, %l[l_yes], %c0\n\t"
+ ".align 2\n\t"
+ ".long 1b - ., %l[l_yes] - ., %c0 - .\n\t"
".popsection\n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
@@ -54,13 +54,5 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
return true;
}
-typedef u64 jump_label_t;
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index c2dd1ad3e648..903d17023d77 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -25,12 +25,12 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- void *addr = (void *)entry->code;
+ void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
- insn = aarch64_insn_gen_branch_imm(entry->code,
- entry->target,
+ insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
+ jump_entry_target(entry),
AARCH64_INSN_BRANCH_NOLINK);
} else {
insn = aarch64_insn_gen_nop();
--
2.11.0
Powered by blists - more mailing lists