[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200317170910.652251150@infradead.org>
Date: Tue, 17 Mar 2020 18:02:49 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: tglx@...utronix.de, jpoimboe@...hat.com
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, peterz@...radead.org,
mhiramat@...nel.org, mbenes@...e.cz, brgerst@...il.com
Subject: [PATCH v2 15/19] objtool: Optimize find_rela_by_dest_range()
Perf shows there is significant time in find_rela_by_dest(); this is
because we have to iterate the address space per byte, looking for
relocation entries.
Optimize this by reducing the address space granularity.
This reduces objtool on vmlinux.o runtime from 4.8 to 4.4 seconds.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
tools/objtool/elf.c | 15 +++++++++++----
tools/objtool/elf.h | 16 +++++++++++++++-
2 files changed, 26 insertions(+), 5 deletions(-)
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -215,7 +215,7 @@ struct symbol *find_symbol_by_name(struc
struct rela *find_rela_by_dest_range(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len)
{
- struct rela *rela;
+ struct rela *rela, *r = NULL;
unsigned long o;
if (!sec->rela)
@@ -223,12 +223,19 @@ struct rela *find_rela_by_dest_range(str
sec = sec->rela;
- for (o = offset; o < offset + len; o++) {
+ for_offset_range(o, offset, offset + len) {
hash_for_each_possible(elf->rela_hash, rela, hash,
sec_offset_hash(sec, o)) {
- if (rela->sec == sec && rela->offset == o)
- return rela;
+ if (rela->sec != sec)
+ continue;
+
+ if (rela->offset >= offset && rela->offset < offset + len) {
+ if (!r || rela->offset < r->offset)
+ r = rela;
+ }
}
+ if (r)
+ return r;
}
return NULL;
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -83,9 +83,23 @@ struct elf {
DECLARE_HASHTABLE(rela_hash, 20);
};
+#define OFFSET_STRIDE_BITS 4
+#define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS)
+#define OFFSET_STRIDE_MASK (~(OFFSET_STRIDE - 1))
+
+#define for_offset_range(_offset, _start, _end) \
+ for (_offset = ((_start) & OFFSET_STRIDE_MASK); \
+ _offset <= ((_end) & OFFSET_STRIDE_MASK); \
+ _offset += OFFSET_STRIDE)
+
static inline u32 sec_offset_hash(struct section *sec, unsigned long offset)
{
- u32 ol = offset, oh = offset >> 32, idx = sec->idx;
+ u32 ol, oh, idx = sec->idx;
+
+ offset &= OFFSET_STRIDE_MASK;
+
+ ol = offset;
+ oh = offset >> 32;
__jhash_mix(ol, oh, idx);
Powered by blists - more mailing lists