lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201127161828.GD840171@carbon.dhcp.thefacebook.com>
Date:   Fri, 27 Nov 2020 08:18:28 -0800
From:   Roman Gushchin <guro@...com>
To:     Shakeel Butt <shakeelb@...gle.com>
CC:     Andrew Morton <akpm@...ux-foundation.org>,
        Linux MM <linux-mm@...ck.org>,
        Johannes Weiner <hannes@...xchg.org>,
        Michal Hocko <mhocko@...nel.org>,
        LKML <linux-kernel@...r.kernel.org>,
        Kernel Team <kernel-team@...com>, <stable@...r.kernel.org>
Subject: Re: [PATCH] mm: memcg/slab: fix obj_cgroup_charge() return value
 handling

On Thu, Nov 26, 2020 at 09:55:24PM -0800, Shakeel Butt wrote:
> On Thu, Nov 26, 2020 at 8:14 PM Roman Gushchin <guro@...com> wrote:
> >
> > Commit 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches
> > for all allocations") introduced a regression into the handling of the
> > obj_cgroup_charge() return value. If a non-zero value is returned
> > (indicating of exceeding one of memory.max limits), the allocation
> > should fail, instead of falling back to non-accounted mode.
> >
> > To make the code more readable, move memcg_slab_pre_alloc_hook()
> > and memcg_slab_post_alloc_hook() calling conditions into bodies
> > of these hooks.
> >
> > Fixes: 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches for all allocations")
> > Signed-off-by: Roman Gushchin <guro@...com>
> > Cc: stable@...r.kernel.org
> > ---
> >  mm/slab.h | 40 ++++++++++++++++++++++++----------------
> >  1 file changed, 24 insertions(+), 16 deletions(-)
> >
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 59aeb0d9f11b..5dc89d8fb05e 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -257,22 +257,32 @@ static inline size_t obj_full_size(struct kmem_cache *s)
> >         return s->size + sizeof(struct obj_cgroup *);
> >  }
> >
> > -static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
> > -                                                          size_t objects,
> > -                                                          gfp_t flags)
> > +/*
> > + * Returns true if the allocation should fail.
> 
> IMO returning false if the allocation should fail makes this more
> clear. Otherwise the patch looks good to me.

Ok, I agree. Here is an updated version.

Thank you for looking in!

--

>From 456ce03f1c91baf5e2441dce0649e09617437fe4 Mon Sep 17 00:00:00 2001
From: Roman Gushchin <guro@...com>
Date: Thu, 26 Nov 2020 07:39:57 -0800
Subject: [PATCH v2] mm: memcg/slab: fix obj_cgroup_charge() return value
 handling

Commit 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches
for all allocations") introduced a regression into the handling of the
obj_cgroup_charge() return value. If a non-zero value is returned
(indicating of exceeding one of memory.max limits), the allocation
should fail, instead of falling back to non-accounted mode.

To make the code more readable, move memcg_slab_pre_alloc_hook()
and memcg_slab_post_alloc_hook() calling conditions into bodies
of these hooks.

Fixes: 10befea91b61 ("mm: memcg/slab: use a single set of kmem_caches for all allocations")
Signed-off-by: Roman Gushchin <guro@...com>
Cc: stable@...r.kernel.org
---
 mm/slab.h | 40 ++++++++++++++++++++++++----------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 59aeb0d9f11b..0698a3c0a9da 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -257,22 +257,32 @@ static inline size_t obj_full_size(struct kmem_cache *s)
 	return s->size + sizeof(struct obj_cgroup *);
 }
 
-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
-							   size_t objects,
-							   gfp_t flags)
+/*
+ * Returns false if the allocation should fail.
+ */
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+					     struct obj_cgroup **objcgp,
+					     size_t objects, gfp_t flags)
 {
 	struct obj_cgroup *objcg;
 
+	if (!memcg_kmem_enabled())
+		return true;
+
+	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
+		return true;
+
 	objcg = get_obj_cgroup_from_current();
 	if (!objcg)
-		return NULL;
+		return true;
 
 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
 		obj_cgroup_put(objcg);
-		return NULL;
+		return false;
 	}
 
-	return objcg;
+	*objcgp = objcg;
+	return true;
 }
 
 static inline void mod_objcg_state(struct obj_cgroup *objcg,
@@ -298,7 +308,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
 	unsigned long off;
 	size_t i;
 
-	if (!objcg)
+	if (!memcg_kmem_enabled() || !objcg)
 		return;
 
 	flags &= ~__GFP_ACCOUNT;
@@ -382,11 +392,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
 {
 }
 
-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
-							   size_t objects,
-							   gfp_t flags)
+static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+					     struct obj_cgroup **objcgp,
+					     size_t objects, gfp_t flags)
 {
-	return NULL;
+	return true;
 }
 
 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
@@ -494,9 +504,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
 	if (should_failslab(s, flags))
 		return NULL;
 
-	if (memcg_kmem_enabled() &&
-	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
-		*objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
+	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
+		return NULL;
 
 	return s;
 }
@@ -515,8 +524,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
 					 s->flags, flags);
 	}
 
-	if (memcg_kmem_enabled())
-		memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
 }
 
 #ifndef CONFIG_SLOB
-- 
2.26.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ