[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y8U8OfoVwkJPdJcv@localhost>
Date: Mon, 16 Jan 2023 11:59:53 +0000
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
To: Rong Tao <rtoax@...mail.com>
Cc: cl@...ux.com, sdf@...gle.com, yhs@...com,
Rong Tao <rongtao@...tc.cn>, Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Roman Gushchin <roman.gushchin@...ux.dev>,
"open list:SLAB ALLOCATOR" <linux-mm@...ck.org>,
open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] mm: Functions used internally should not be put into
slub_def.h
On Mon, Jan 16, 2023 at 04:50:05PM +0800, Rong Tao wrote:
> From: Rong Tao <rongtao@...tc.cn>
>
> commit 40f3bf0cb04c("mm: Convert struct page to struct slab in functions
> used by other subsystems") introduce 'slab_address()' and 'struct slab'
> in slab_def.h(CONFIG_SLAB) and slub_def.h(CONFIG_SLUB). When referencing
> a header file <linux/slub_def.h> in a module or BPF code, 'slab_address()'
> and 'struct slab' are not recognized, resulting in incomplete and
> undefined errors(see bcc slabratetop.py error [0]).
>
Hello Rong,
IMO sl*b_def.h is not intended to be used externally.
and I'm not sure if it's worth for -stable release too.
IIUC The reason for slabratetop.py to rely on sl*b_def.h is to read
cachep->cache and cachep->size.
I think this can be solved if you use a tool that supports BPF Type Format?
> Moving the function definitions of reference data structures such as
> struct slab and slab_address() such as nearest_obj(), obj_to_index(),
> and objs_per_slab() to the internal header file slab.h solves this
> fatal problem.
>
> [0] https://github.com/iovisor/bcc/issues/4438
>
> Signed-off-by: Rong Tao <rongtao@...tc.cn>
> ---
> include/linux/slab_def.h | 33 --------------------
> include/linux/slub_def.h | 32 -------------------
> mm/slab.h | 66 ++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 66 insertions(+), 65 deletions(-)
>
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index 5834bad8ad78..5658b5fddf9b 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -88,37 +88,4 @@ struct kmem_cache {
> struct kmem_cache_node *node[MAX_NUMNODES];
> };
>
> -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> - void *x)
> -{
> - void *object = x - (x - slab->s_mem) % cache->size;
> - void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
> -
> - if (unlikely(object > last_object))
> - return last_object;
> - else
> - return object;
> -}
> -
> -/*
> - * We want to avoid an expensive divide : (offset / cache->size)
> - * Using the fact that size is a constant for a particular cache,
> - * we can replace (offset / cache->size) by
> - * reciprocal_divide(offset, cache->reciprocal_buffer_size)
> - */
> -static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> - const struct slab *slab, void *obj)
> -{
> - u32 offset = (obj - slab->s_mem);
> - return reciprocal_divide(offset, cache->reciprocal_buffer_size);
> -}
> -
> -static inline int objs_per_slab(const struct kmem_cache *cache,
> - const struct slab *slab)
> -{
> - if (is_kfence_address(slab_address(slab)))
> - return 1;
> - return cache->num;
> -}
> -
> #endif /* _LINUX_SLAB_DEF_H */
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> index aa0ee1678d29..660fd6b2a748 100644
> --- a/include/linux/slub_def.h
> +++ b/include/linux/slub_def.h
> @@ -163,36 +163,4 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
>
> void *fixup_red_left(struct kmem_cache *s, void *p);
>
> -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> - void *x) {
> - void *object = x - (x - slab_address(slab)) % cache->size;
> - void *last_object = slab_address(slab) +
> - (slab->objects - 1) * cache->size;
> - void *result = (unlikely(object > last_object)) ? last_object : object;
> -
> - result = fixup_red_left(cache, result);
> - return result;
> -}
> -
> -/* Determine object index from a given position */
> -static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> - void *addr, void *obj)
> -{
> - return reciprocal_divide(kasan_reset_tag(obj) - addr,
> - cache->reciprocal_size);
> -}
> -
> -static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> - const struct slab *slab, void *obj)
> -{
> - if (is_kfence_address(obj))
> - return 0;
> - return __obj_to_index(cache, slab_address(slab), obj);
> -}
> -
> -static inline int objs_per_slab(const struct kmem_cache *cache,
> - const struct slab *slab)
> -{
> - return slab->objects;
> -}
> #endif /* _LINUX_SLUB_DEF_H */
> diff --git a/mm/slab.h b/mm/slab.h
> index 7cc432969945..38350a0efa91 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -227,10 +227,76 @@ struct kmem_cache {
>
> #ifdef CONFIG_SLAB
> #include <linux/slab_def.h>
> +
> +static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> + void *x)
> +{
> + void *object = x - (x - slab->s_mem) % cache->size;
> + void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
> +
> + if (unlikely(object > last_object))
> + return last_object;
> + else
> + return object;
> +}
> +
> +/*
> + * We want to avoid an expensive divide : (offset / cache->size)
> + * Using the fact that size is a constant for a particular cache,
> + * we can replace (offset / cache->size) by
> + * reciprocal_divide(offset, cache->reciprocal_buffer_size)
> + */
> +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> + const struct slab *slab, void *obj)
> +{
> + u32 offset = (obj - slab->s_mem);
> + return reciprocal_divide(offset, cache->reciprocal_buffer_size);
> +}
> +
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> + const struct slab *slab)
> +{
> + if (is_kfence_address(slab_address(slab)))
> + return 1;
> + return cache->num;
> +}
> #endif
>
> #ifdef CONFIG_SLUB
> #include <linux/slub_def.h>
> +
> +static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> + void *x) {
> + void *object = x - (x - slab_address(slab)) % cache->size;
> + void *last_object = slab_address(slab) +
> + (slab->objects - 1) * cache->size;
> + void *result = (unlikely(object > last_object)) ? last_object : object;
> +
> + result = fixup_red_left(cache, result);
> + return result;
> +}
> +
> +/* Determine object index from a given position */
> +static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> + void *addr, void *obj)
> +{
> + return reciprocal_divide(kasan_reset_tag(obj) - addr,
> + cache->reciprocal_size);
> +}
> +
> +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> + const struct slab *slab, void *obj)
> +{
> + if (is_kfence_address(obj))
> + return 0;
> + return __obj_to_index(cache, slab_address(slab), obj);
> +}
> +
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> + const struct slab *slab)
> +{
> + return slab->objects;
> +}
> #endif
>
> #include <linux/memcontrol.h>
> --
> 2.39.0
>
Powered by blists - more mailing lists