[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200312135042.346616828@infradead.org>
Date: Thu, 12 Mar 2020 14:41:23 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: tglx@...utronix.de, jpoimboe@...hat.com
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, peterz@...radead.org
Subject: [RFC][PATCH 16/16] objtool: Optimize !vmlinux.o again
When doing kbuild tests to see if the objtool changes affected those I
found that there was a measurable regression:
pre post
real 1m13.594 1m16.488s
user 34m58.246s 35m23.947s
sys 4m0.393s 4m27.312s
Perf showed that for small files the increased hash-table sizes were a
measurable difference. Since we already have -l "vmlinux" to
distinguish between the modes, make it also use a smaller portion of
the hash-tables.
This flips it into a small win:
real 1m14.143s
user 34m49.292s
sys 3m44.746s
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
tools/objtool/elf.c | 40 ++++++++++++++++++++++++++++------------
tools/objtool/elf.h | 4 ++--
2 files changed, 30 insertions(+), 14 deletions(-)
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -27,6 +27,21 @@ static inline u32 str_hash(const char *s
return jhash(str, strlen(str), 0);
}
+static inline int elf_hash_bits(void)
+{
+ return vmlinux ? 20 : 16;
+}
+
+static inline void elf_hash_add(struct hlist_head *table, struct hlist_node *node, u32 key)
+{
+ hlist_add_head(node, &table[hash_32(key, elf_hash_bits())]);
+}
+
+static void elf_hash_init(struct hlist_head *table)
+{
+ __hash_init(table, 1U << elf_hash_bits());
+}
+
static void rb_add(struct rb_root *tree, struct rb_node *node,
int (*cmp)(struct rb_node *, const struct rb_node *))
{
@@ -300,8 +315,8 @@ static int read_sections(struct elf *elf
}
sec->len = sec->sh.sh_size;
- hash_add(elf->section_hash, &sec->hash, sec->idx);
- hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
+ elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
}
if (stats)
@@ -387,8 +402,8 @@ static int read_symbols(struct elf *elf)
entry = &sym->sec->symbol_list;
list_add(&sym->list, entry);
- hash_add(elf->symbol_hash, &sym->hash, sym->idx);
- hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+ elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
+ elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
}
if (stats)
@@ -497,7 +512,7 @@ static int read_relas(struct elf *elf)
}
list_add_tail(&rela->list, &sec->rela_list);
- hash_add(elf->rela_hash, &rela->hash, rela_hash(rela));
+ elf_hash_add(elf->rela_hash, &rela->hash, rela_hash(rela));
nr_rela++;
}
max_rela = max(max_rela, nr_rela);
@@ -524,15 +539,16 @@ struct elf *elf_read(const char *name, i
perror("malloc");
return NULL;
}
- memset(elf, 0, sizeof(*elf));
+ memset(elf, 0, offsetof(struct elf, sections));
- hash_init(elf->symbol_hash);
- hash_init(elf->symbol_name_hash);
- hash_init(elf->section_hash);
- hash_init(elf->section_name_hash);
- hash_init(elf->rela_hash);
INIT_LIST_HEAD(&elf->sections);
+ elf_hash_init(elf->symbol_hash);
+ elf_hash_init(elf->symbol_name_hash);
+ elf_hash_init(elf->section_hash);
+ elf_hash_init(elf->section_name_hash);
+ elf_hash_init(elf->rela_hash);
+
elf->fd = open(name, flags);
if (elf->fd == -1) {
fprintf(stderr, "objtool: Can't open '%s': %s\n",
@@ -671,7 +687,7 @@ struct section *elf_create_section(struc
shstrtab->len += strlen(name) + 1;
shstrtab->changed = true;
- hash_add(elf->section_hash, &sec->hash, sec->idx);
+ elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
return sec;
}
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -81,8 +81,8 @@ struct elf {
struct list_head sections;
DECLARE_HASHTABLE(symbol_hash, 20);
DECLARE_HASHTABLE(symbol_name_hash, 20);
- DECLARE_HASHTABLE(section_hash, 16);
- DECLARE_HASHTABLE(section_name_hash, 16);
+ DECLARE_HASHTABLE(section_hash, 20);
+ DECLARE_HASHTABLE(section_name_hash, 20);
DECLARE_HASHTABLE(rela_hash, 20);
};
Powered by blists - more mailing lists