[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <28e1a9dc-59d1-442c-d4fe-e31ece0830d8@akamai.com>
Date: Thu, 19 Jan 2017 13:58:51 -0500
From: Jason Baron <jbaron@...mai.com>
To: Jason Baron <jbaron@...mai.com>, peterz@...radead.org,
rostedt@...dmis.org, mingo@...nel.org
Cc: linux-kernel@...r.kernel.org, Joe Perches <joe@...ches.com>
Subject: Re: [PATCH v2] jump_label: reduce the size of struct static_key
On 01/04/2017 01:41 PM, Jason Baron wrote:
> The static_key->next field goes mostly unused. The field is used for
> associating module uses with a static key. Most uses of struct static_key
> define a static key in the core kernel and make use of it entirely within
> the core kernel, or define the static key in a module and make use of it
> only from within that module. In fact, of the ~3,000 static keys defined,
> I found only about 5 or so that did not fit this pattern.
Hi,
Any thoughts on this?
Thanks,
-Jason
>
> Thus, we can remove the static_key->next field entirely and overload
> the static_key->entries field. That is, when all the static_key uses
> are contained within the same module, static_key->entries continues
> to point to those uses. However, if the static_key uses are not contained
> within the module where the static_key is defined, then we allocate a
> struct static_key_mod, store a pointer to the uses within that
> struct static_key_mod, and have the static key point at the static_key_mod.
> This does incur some extra memory usage when a static_key is used in a
> module that does not define it, but since there are only a handful of such
> cases there is a net savings.
>
> In order to identify if the static_key->entries pointer contains a
> struct static_key_mod or a struct jump_entry pointer, bit 1 of
> static_key->entries is set to 1 if it points to a struct static_key_mod and
> is 0 if it points to a struct jump_entry. We were already using bit 0 in a
> similar way to store the initial value of the static_key. This does mean
> that allocations of struct static_key_mod and that the struct jump_entry
> tables need to be at least 4-byte aligned in memory. As far as I can tell
> all arches meet this criteria.
>
> For my .config, the patch increased the text by 253 bytes, but reduced
> the data + bss size by 19,136, for a net savings of 18,883 bytes.
>
> text data bss dec hex filename
> 8315249 5061176 794624 14171049 d83ba9 vmlinux.pre
> 8315502 5046136 790528 14152166 d7f1e6 vmlinux.post
>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Steven Rostedt <rostedt@...dmis.org>
> Cc: Ingo Molnar <mingo@...nel.org>
> Cc: Joe Perches <joe@...ches.com>
> Signed-off-by: Jason Baron <jbaron@...mai.com>
> ---
> Changed in v2:
> -Replace static_key->entries with union (Steven Rostedt)
> ---
> Documentation/static-keys.txt | 4 +-
> include/linux/jump_label.h | 23 +++++++----
> kernel/jump_label.c | 94 +++++++++++++++++++++++++++++++++----------
> 3 files changed, 90 insertions(+), 31 deletions(-)
>
> diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
> index ea8d7b4..32a25fa 100644
> --- a/Documentation/static-keys.txt
> +++ b/Documentation/static-keys.txt
> @@ -155,7 +155,9 @@ or:
>
> There are a few functions and macros that architectures must implement in order
> to take advantage of this optimization. If there is no architecture support, we
> -simply fall back to a traditional, load, test, and jump sequence.
> +simply fall back to a traditional, load, test, and jump sequence. Also, the
> +struct jump_entry table must be at least 4-byte aligned because the
> +static_key->entry field makes use of the two least significant bits.
>
> * select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig
>
> diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
> index a0547c5..680c98b 100644
> --- a/include/linux/jump_label.h
> +++ b/include/linux/jump_label.h
> @@ -89,11 +89,17 @@ extern bool static_key_initialized;
>
> struct static_key {
> atomic_t enabled;
> -/* Set lsb bit to 1 if branch is default true, 0 ot */
> - struct jump_entry *entries;
> -#ifdef CONFIG_MODULES
> - struct static_key_mod *next;
> -#endif
> +/*
> + * bit 0 => 1 if key is initially true
> + * 0 if initially false
> + * bit 1 => 1 if points to struct static_key_mod
> + * 0 if points to struct jump_entry
> + */
> + union {
> + unsigned long type;
> + struct jump_entry *entries;
> + struct static_key_mod *next;
> + };
> };
>
> #else
> @@ -118,9 +124,10 @@ struct module;
>
> #ifdef HAVE_JUMP_LABEL
>
> -#define JUMP_TYPE_FALSE 0UL
> -#define JUMP_TYPE_TRUE 1UL
> -#define JUMP_TYPE_MASK 1UL
> +#define JUMP_TYPE_FALSE 0UL
> +#define JUMP_TYPE_TRUE 1UL
> +#define JUMP_TYPE_LINKED 2UL
> +#define JUMP_TYPE_MASK 3UL
>
> static __always_inline bool static_key_false(struct static_key *key)
> {
> diff --git a/kernel/jump_label.c b/kernel/jump_label.c
> index 93ad6c1..80cc496 100644
> --- a/kernel/jump_label.c
> +++ b/kernel/jump_label.c
> @@ -229,12 +229,27 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
>
> static inline struct jump_entry *static_key_entries(struct static_key *key)
> {
> - return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
> + return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
> }
>
> static inline bool static_key_type(struct static_key *key)
> {
> - return (unsigned long)key->entries & JUMP_TYPE_MASK;
> + return key->type & JUMP_TYPE_TRUE;
> +}
> +
> +static inline bool static_key_linked(struct static_key *key)
> +{
> + return key->type & JUMP_TYPE_LINKED;
> +}
> +
> +static inline void static_key_clear_linked(struct static_key *key)
> +{
> + key->type &= ~JUMP_TYPE_LINKED;
> +}
> +
> +static inline void static_key_set_linked(struct static_key *key)
> +{
> + key->type |= JUMP_TYPE_LINKED;
> }
>
> static inline struct static_key *jump_entry_key(struct jump_entry *entry)
> @@ -307,12 +322,9 @@ void __init jump_label_init(void)
>
> key = iterk;
> /*
> - * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
> + * Set key->type to iter, preserving JUMP_TYPE_MASK bits
> */
> - *((unsigned long *)&key->entries) += (unsigned long)iter;
> -#ifdef CONFIG_MODULES
> - key->next = NULL;
> -#endif
> + key->type += (unsigned long)iter;
> }
> static_key_initialized = true;
> jump_label_unlock();
> @@ -336,6 +348,11 @@ struct static_key_mod {
> struct module *mod;
> };
>
> +static inline struct static_key_mod *static_key_mod(struct static_key *key)
> +{
> + return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
> +}
> +
> static int __jump_label_mod_text_reserved(void *start, void *end)
> {
> struct module *mod;
> @@ -356,13 +373,19 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
>
> static void __jump_label_mod_update(struct static_key *key)
> {
> - struct static_key_mod *mod;
> + struct static_key_mod *mod = static_key_mod(key);
>
> - for (mod = key->next; mod; mod = mod->next) {
> + while (mod) {
> + struct jump_entry *stop;
> struct module *m = mod->mod;
>
> - __jump_label_update(key, mod->entries,
> - m->jump_entries + m->num_jump_entries);
> + if (!m)
> + stop = __stop___jump_table;
> + else
> + stop = m->jump_entries + m->num_jump_entries;
> + if (mod->entries)
> + __jump_label_update(key, mod->entries, stop);
> + mod = mod->next;
> }
> }
>
> @@ -397,7 +420,7 @@ static int jump_label_add_module(struct module *mod)
> struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
> struct jump_entry *iter;
> struct static_key *key = NULL;
> - struct static_key_mod *jlm;
> + struct static_key_mod *jlm, *jlm2;
>
> /* if the module doesn't have jump label entries, just return */
> if (iter_start == iter_stop)
> @@ -415,19 +438,34 @@ static int jump_label_add_module(struct module *mod)
> key = iterk;
> if (within_module(iter->key, mod)) {
> /*
> - * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
> + * Set key->type to iter, preserving JUMP_TYPE_MASK bits
> */
> - *((unsigned long *)&key->entries) += (unsigned long)iter;
> - key->next = NULL;
> + key->type += (unsigned long)iter;
> continue;
> }
> jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
> if (!jlm)
> return -ENOMEM;
> + if (!static_key_linked(key)) {
> + jlm2 = kzalloc(sizeof(struct static_key_mod),
> + GFP_KERNEL);
> + if (!jlm2) {
> + kfree(jlm);
> + return -ENOMEM;
> + }
> + preempt_disable();
> + jlm2->mod = __module_address((unsigned long)key);
> + preempt_enable();
> + jlm2->entries = static_key_entries(key);
> + jlm2->next = NULL;
> + key->type = (unsigned long)jlm2 | static_key_type(key);
> + static_key_set_linked(key);
> + }
> jlm->mod = mod;
> jlm->entries = iter;
> - jlm->next = key->next;
> - key->next = jlm;
> + jlm->next = static_key_mod(key);
> + key->type = (unsigned long)jlm | static_key_type(key);
> + static_key_set_linked(key);
>
> /* Only update if we've changed from our initial state */
> if (jump_label_type(iter) != jump_label_init_type(iter))
> @@ -455,15 +493,26 @@ static void jump_label_del_module(struct module *mod)
> continue;
>
> prev = &key->next;
> - jlm = key->next;
> + jlm = static_key_mod(key);
>
> while (jlm && jlm->mod != mod) {
> prev = &jlm->next;
> jlm = jlm->next;
> }
>
> - if (jlm) {
> - *prev = jlm->next;
> + if (!jlm)
> + continue;
> +
> + *prev = (struct static_key_mod *)((unsigned long)jlm->next |
> + ((unsigned long)*prev & JUMP_TYPE_MASK));
> + kfree(jlm);
> +
> + jlm = static_key_mod(key);
> + /* if only one etry is left, fold it back into the static_key */
> + if (jlm->next == NULL) {
> + key->type = (unsigned long)jlm->entries |
> + static_key_type(key);
> + static_key_clear_linked(key);
> kfree(jlm);
> }
> }
> @@ -558,7 +607,8 @@ static void jump_label_update(struct static_key *key)
> #ifdef CONFIG_MODULES
> struct module *mod;
>
> - __jump_label_mod_update(key);
> + if (static_key_linked(key))
> + __jump_label_mod_update(key);
>
> preempt_disable();
> mod = __module_address((unsigned long)key);
> @@ -567,7 +617,7 @@ static void jump_label_update(struct static_key *key)
> preempt_enable();
> #endif
> /* if there are no users, entry can be NULL */
> - if (entry)
> + if (entry && !static_key_linked(key))
> __jump_label_update(key, entry, stop);
> }
>
>
Powered by blists - more mailing lists