[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220324110603.GA2112827@odroid>
Date: Thu, 24 Mar 2022 11:06:03 +0000
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
To: Vlastimil Babka <vbabka@...e.cz>
Cc: Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Marco Elver <elver@...gle.com>,
Matthew WilCox <willy@...radead.org>,
Roman Gushchin <roman.gushchin@...ux.dev>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH v1 01/15] mm/slab: cleanup slab_alloc() and
slab_alloc_node()
Vlastimil wrote:
> On 3/8/22 12:41, Hyeonggon Yoo wrote:
> > +
> > static __always_inline void *
> > -slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> > + unsigned long caller)
> > {
> > unsigned long save_flags;
> > - void *objp;
> > + void *ptr;
> > + int slab_node = numa_mem_id();
> > struct obj_cgroup *objcg = NULL;
> > bool init = false;
> >
> > @@ -3299,21 +3255,49 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo
> > if (unlikely(!cachep))
> > return NULL;
> >
> > - objp = kfence_alloc(cachep, orig_size, flags);
> > - if (unlikely(objp))
> > - goto out;
> > + ptr = kfence_alloc(cachep, orig_size, flags);
> > + if (unlikely(ptr))
> > + goto out_hooks;
> >
> > cache_alloc_debugcheck_before(cachep, flags);
> > local_irq_save(save_flags);
> > - objp = __do_cache_alloc(cachep, flags);
>
> Looks like after this patch, slab_alloc() (without a node specified)
> will not end up in __do_cache_alloc() anymore, so there's no more
> possibility of alternate_node_alloc(), which looks like a functional
> regression?
>
Ah, that was not intended. Thank you for catching this!
Will fix in v2.
Thank you so much.
> > +
> > + if (node_match(nodeid, slab_node)) {
> > + /*
> > + * Use the locally cached objects if possible.
> > + * However ____cache_alloc does not allow fallback
> > + * to other nodes. It may fail while we still have
> > + * objects on other nodes available.
> > + */
> > + ptr = ____cache_alloc(cachep, flags);
> > + if (ptr)
> > + goto out;
> > + }
> > +#ifdef CONFIG_NUMA
> > + else if (unlikely(!get_node(cachep, nodeid))) {
> > + /* Node not bootstrapped yet */
> > + ptr = fallback_alloc(cachep, flags);
> > + goto out;
> > + }
> > +
> > + /* ___cache_alloc_node can fall back to other nodes */
> > + ptr = ____cache_alloc_node(cachep, flags, nodeid);
> > +#endif
> > +out:
> > local_irq_restore(save_flags);
> > - objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
> > - prefetchw(objp);
> > + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
> > + prefetchw(ptr);
> > init = slab_want_init_on_alloc(flags, cachep);
> >
> > -out:
> > - slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
> > - return objp;
> > +out_hooks:
> > + slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
> > + return ptr;
> > +}
> > +
> > +static __always_inline void *
> > +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> > +{
> > + return slab_alloc_node(cachep, flags, NUMA_NO_NODE, orig_size, caller);
> > }
> >
> > /*
--
Thank you, You are awesome!
Hyeonggon :-)
Powered by blists - more mailing lists