lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  8 Mar 2022 13:10:50 +0000
From:   Yafang Shao <laoar.shao@...il.com>
To:     ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
        kafai@...com, songliubraving@...com, yhs@...com,
        john.fastabend@...il.com, kpsingh@...nel.org,
        akpm@...ux-foundation.org, cl@...ux.com, penberg@...nel.org,
        rientjes@...gle.com, iamjoonsoo.kim@....com, vbabka@...e.cz,
        hannes@...xchg.org, mhocko@...nel.org, vdavydov.dev@...il.com,
        guro@...com
Cc:     linux-mm@...ck.org, netdev@...r.kernel.org, bpf@...r.kernel.org,
        Yafang Shao <laoar.shao@...il.com>
Subject: [PATCH RFC 3/9] mm: add methord to charge kmalloc-ed address

This patch implements a methord to charge or uncharge related pages
or objects from a given kmalloc-ed address. It is similar to kfree,
except that it doesn't touch the pages or objects while does account
only.

Signed-off-by: Yafang Shao <laoar.shao@...il.com>
---
 include/linux/slab.h |  1 +
 mm/slab.c            |  6 ++++++
 mm/slob.c            |  6 ++++++
 mm/slub.c            | 32 ++++++++++++++++++++++++++++++++
 4 files changed, 45 insertions(+)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5b6193f..ae82e23 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -182,6 +182,7 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
+void kcharge(const void *objp, bool charge);
 size_t __ksize(const void *objp);
 size_t ksize(const void *objp);
 #ifdef CONFIG_PRINTK
diff --git a/mm/slab.c b/mm/slab.c
index ddf5737..fbff613 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3796,6 +3796,12 @@ void kfree(const void *objp)
 }
 EXPORT_SYMBOL(kfree);
 
+void kcharge(const void *objp, bool charge)
+{
+	/* Not implemented yet */
+}
+EXPORT_SYMBOL(kfree);
+
 /*
  * This initializes kmem_cache_node or resizes various caches for all nodes.
  */
diff --git a/mm/slob.c b/mm/slob.c
index 60c5842..d3a789f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -569,6 +569,12 @@ void kfree(const void *block)
 }
 EXPORT_SYMBOL(kfree);
 
+void kcharge(const void *block, bool charge)
+{
+	/* not implemented yet. */
+}
+EXPORT_SYMBOL(kcharge);
+
 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
 size_t __ksize(const void *block)
 {
diff --git a/mm/slub.c b/mm/slub.c
index 2614740..e933d45 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4563,6 +4563,38 @@ void kfree(const void *x)
 }
 EXPORT_SYMBOL(kfree);
 
+void kcharge(const void *x, bool charge)
+{
+	void *object = (void *)x;
+	struct folio *folio;
+	struct slab *slab;
+
+	WARN_ON(!in_task());
+
+	if (unlikely(ZERO_OR_NULL_PTR(x)))
+		return;
+
+	folio = virt_to_folio(x);
+	if (unlikely(!folio_test_slab(folio))) {
+		unsigned int order = folio_order(folio);
+		int sign = charge ? 1 : -1;
+
+		mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+			sign * (PAGE_SIZE << order));
+
+		return;
+	}
+
+	slab = folio_slab(folio);
+	if (charge)
+		memcg_slab_post_alloc_hook(slab->slab_cache,
+			get_obj_cgroup_from_current(), GFP_KERNEL, 1, &object);
+	else
+		memcg_slab_free_hook(slab->slab_cache, &object, 1);
+
+}
+EXPORT_SYMBOL(kcharge);
+
 #define SHRINK_PROMOTE_MAX 32
 
 /*
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ