lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230915105933.495735-13-matteorizzo@google.com>
Date:   Fri, 15 Sep 2023 10:59:31 +0000
From:   Matteo Rizzo <matteorizzo@...gle.com>
To:     cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
        iamjoonsoo.kim@....com, akpm@...ux-foundation.org, vbabka@...e.cz,
        roman.gushchin@...ux.dev, 42.hyeyoo@...il.com,
        keescook@...omium.org, linux-kernel@...r.kernel.org,
        linux-doc@...r.kernel.org, linux-mm@...ck.org,
        linux-hardening@...r.kernel.org, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com,
        x86@...nel.org, hpa@...or.com, corbet@....net, luto@...nel.org,
        peterz@...radead.org
Cc:     jannh@...gle.com, matteorizzo@...gle.com, evn@...gle.com,
        poprdi@...gle.com, jordyzomer@...gle.com
Subject: [RFC PATCH 12/14] mm/slub: introduce the deallocated_pages sysfs attribute

From: Jann Horn <jannh@...gle.com>

When SLAB_VIRTUAL is enabled this new sysfs attribute tracks the number
of slab pages whose physical memory has been reclaimed but whose virtual
memory is still allocated to a kmem_cache.

Signed-off-by: Jann Horn <jannh@...gle.com>
Co-developed-by: Matteo Rizzo <matteorizzo@...gle.com>
Signed-off-by: Matteo Rizzo <matteorizzo@...gle.com>
---
 include/linux/slub_def.h |  4 +++-
 mm/slub.c                | 18 ++++++++++++++++++
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 693e9bb34edc..eea402d849da 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -88,7 +88,7 @@ struct kmem_cache_cpu {
  */
 struct kmem_cache_virtual {
 #ifdef CONFIG_SLAB_VIRTUAL
-	/* Protects freed_slabs and freed_slabs_min */
+	/* Protects freed_slabs, freed_slabs_min, and nr_free_pages */
 	spinlock_t freed_slabs_lock;
 	/*
 	 * Slabs on this list have virtual memory of size oo allocated to them
@@ -97,6 +97,8 @@ struct kmem_cache_virtual {
 	struct list_head freed_slabs;
 	/* Same as freed_slabs but with memory of size min */
 	struct list_head freed_slabs_min;
+	/* Number of slab pages which got freed */
+	unsigned long nr_freed_pages;
 #endif
 };
 
diff --git a/mm/slub.c b/mm/slub.c
index 66ae60cdadaf..0f7f5bf0b174 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2110,6 +2110,8 @@ static struct slab *get_free_slab(struct kmem_cache *s,
 
 	if (likely(slab)) {
 		list_del(&slab->slab_list);
+		WRITE_ONCE(s->virtual.nr_freed_pages,
+			s->virtual.nr_freed_pages - (1UL << slab_order(slab)));
 
 		spin_unlock_irqrestore(&s->virtual.freed_slabs_lock, flags);
 		return slab;
@@ -2158,6 +2160,8 @@ static struct slab *alloc_slab_page(struct kmem_cache *s,
 		/* Rollback: put the struct slab back. */
 		spin_lock_irqsave(&s->virtual.freed_slabs_lock, flags);
 		list_add(&slab->slab_list, freed_slabs);
+		WRITE_ONCE(s->virtual.nr_freed_pages,
+			s->virtual.nr_freed_pages + (1UL << slab_order(slab)));
 		spin_unlock_irqrestore(&s->virtual.freed_slabs_lock, flags);
 
 		return NULL;
@@ -2438,6 +2442,8 @@ static void slub_tlbflush_worker(struct kthread_work *work)
 			WARN_ON(oo_order(slab->oo) != oo_order(s->min));
 			list_add(&slab->slab_list, &s->virtual.freed_slabs_min);
 		}
+		WRITE_ONCE(s->virtual.nr_freed_pages, s->virtual.nr_freed_pages +
+			(1UL << slab_order(slab)));
 		spin_unlock(&s->virtual.freed_slabs_lock);
 	}
 	spin_unlock_irqrestore(&slub_kworker_lock, irq_flags);
@@ -4924,6 +4930,7 @@ static inline void slab_virtual_open(struct kmem_cache *s)
 	spin_lock_init(&s->virtual.freed_slabs_lock);
 	INIT_LIST_HEAD(&s->virtual.freed_slabs);
 	INIT_LIST_HEAD(&s->virtual.freed_slabs_min);
+	s->virtual.nr_freed_pages = 0;
 #endif
 }
 
@@ -6098,6 +6105,14 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
 }
 SLAB_ATTR_RO(objects_partial);
 
+#ifdef CONFIG_SLAB_VIRTUAL
+static ssize_t deallocated_pages_show(struct kmem_cache *s, char *buf)
+{
+	return sysfs_emit(buf, "%lu\n", READ_ONCE(s->virtual.nr_freed_pages));
+}
+SLAB_ATTR_RO(deallocated_pages);
+#endif /* CONFIG_SLAB_VIRTUAL */
+
 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 {
 	int objects = 0;
@@ -6424,6 +6439,9 @@ static struct attribute *slab_attrs[] = {
 	&min_partial_attr.attr,
 	&cpu_partial_attr.attr,
 	&objects_partial_attr.attr,
+#ifdef CONFIG_SLAB_VIRTUAL
+	&deallocated_pages_attr.attr,
+#endif
 	&partial_attr.attr,
 	&cpu_slabs_attr.attr,
 	&ctor_attr.attr,
-- 
2.42.0.459.ge4e396fd5e-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ