[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <915667d7-a13e-423c-89a0-9f877143fd14@suse.cz>
Date: Fri, 26 Jul 2024 12:24:46 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Xiongwei Song <sxwjean@...il.com>
Cc: "Paul E. McKenney" <paulmck@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>, Boqun Feng <boqun.feng@...il.com>,
Christoph Lameter <cl@...ux.com>, David Rientjes <rientjes@...gle.com>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>, Zqiang <qiang.zhang1211@...il.com>,
Julia Lawall <Julia.Lawall@...ia.fr>, Jakub Kicinski <kuba@...nel.org>,
"Jason A. Donenfeld" <Jason@...c4.com>,
"Uladzislau Rezki (Sony)" <urezki@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, rcu@...r.kernel.org
Subject: Re: [PATCH RFC 2/6] mm, slab: always maintain per-node slab and
object count
On 7/22/24 4:16 PM, Xiongwei Song wrote:
> Don't we need the following changes for this patch?
Yes thanks, will fix!
> diff --git a/mm/slub.c b/mm/slub.c
> index c1222467c346..e6beb6743342 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4967,9 +4967,9 @@ init_kmem_cache_node(struct kmem_cache_node *n)
> n->nr_partial = 0;
> spin_lock_init(&n->list_lock);
> INIT_LIST_HEAD(&n->partial);
> -#ifdef CONFIG_SLUB_DEBUG
> atomic_long_set(&n->nr_slabs, 0);
> atomic_long_set(&n->total_objects, 0);
> +#ifdef CONFIG_SLUB_DEBUG
> INIT_LIST_HEAD(&n->full);
> #endif
> }
>
> Thanks,
> Xiongwei
>
>
> On Tue, Jul 16, 2024 at 4:29 AM Vlastimil Babka <vbabka@...e.cz> wrote:
>>
>> Currently SLUB counts per-node slabs and total objects only with
>> CONFIG_SLUB_DEBUG, in order to minimize overhead. However, the detection
>> in __kmem_cache_shutdown() whether there are no outstanding object
>> relies on the per-node slab count (node_nr_slabs()) so it may be
>> unreliable without CONFIG_SLUB_DEBUG. Thus we might be failing to warn
>> about such situations, and instead destroy a cache while leaving its
>> slab(s) around (due to a buggy slab user creating such a scenario, not
>> in normal operation).
>>
>> We will also need node_nr_slabs() to be reliable in the following work
>> to gracefully handle kmem_cache_destroy() with kfree_rcu() objects in
>> flight. Thus make the counting of per-node slabs and objects
>> unconditional.
>>
>> Note that CONFIG_SLUB_DEBUG is the default anyway, and the counting is
>> done only when allocating or freeing a slab page, so even in
>> !CONFIG_SLUB_DEBUG configs the overhead should be negligible.
>>
>> Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
>> ---
>> mm/slub.c | 49 +++++++++++++++++++++----------------------------
>> 1 file changed, 21 insertions(+), 28 deletions(-)
>>
>> diff --git a/mm/slub.c b/mm/slub.c
>> index 829a1f08e8a2..aa4d80109c49 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -426,9 +426,9 @@ struct kmem_cache_node {
>> spinlock_t list_lock;
>> unsigned long nr_partial;
>> struct list_head partial;
>> -#ifdef CONFIG_SLUB_DEBUG
>> atomic_long_t nr_slabs;
>> atomic_long_t total_objects;
>> +#ifdef CONFIG_SLUB_DEBUG
>> struct list_head full;
>> #endif
>> };
>> @@ -438,6 +438,26 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
>> return s->node[node];
>> }
>>
>> +static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
>> +{
>> + return atomic_long_read(&n->nr_slabs);
>> +}
>> +
>> +static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
>> +{
>> + struct kmem_cache_node *n = get_node(s, node);
>> +
>> + atomic_long_inc(&n->nr_slabs);
>> + atomic_long_add(objects, &n->total_objects);
>> +}
>> +static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
>> +{
>> + struct kmem_cache_node *n = get_node(s, node);
>> +
>> + atomic_long_dec(&n->nr_slabs);
>> + atomic_long_sub(objects, &n->total_objects);
>> +}
>> +
>> /*
>> * Iterator over all nodes. The body will be executed for each node that has
>> * a kmem_cache_node structure allocated (which is true for all online nodes)
>> @@ -1511,26 +1531,6 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
>> list_del(&slab->slab_list);
>> }
>>
>> -static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
>> -{
>> - return atomic_long_read(&n->nr_slabs);
>> -}
>> -
>> -static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
>> -{
>> - struct kmem_cache_node *n = get_node(s, node);
>> -
>> - atomic_long_inc(&n->nr_slabs);
>> - atomic_long_add(objects, &n->total_objects);
>> -}
>> -static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
>> -{
>> - struct kmem_cache_node *n = get_node(s, node);
>> -
>> - atomic_long_dec(&n->nr_slabs);
>> - atomic_long_sub(objects, &n->total_objects);
>> -}
>> -
>> /* Object debug checks for alloc/free paths */
>> static void setup_object_debug(struct kmem_cache *s, void *object)
>> {
>> @@ -1871,13 +1871,6 @@ slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
>>
>> #define disable_higher_order_debug 0
>>
>> -static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
>> - { return 0; }
>> -static inline void inc_slabs_node(struct kmem_cache *s, int node,
>> - int objects) {}
>> -static inline void dec_slabs_node(struct kmem_cache *s, int node,
>> - int objects) {}
>> -
>> #ifndef CONFIG_SLUB_TINY
>> static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
>> void **freelist, void *nextfree)
>>
>> --
>> 2.45.2
>>
>>
>
Powered by blists - more mailing lists