lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250716202006.3640584-3-youngjun.park@lge.com>
Date: Thu, 17 Jul 2025 05:20:04 +0900
From: Youngjun Park <youngjun.park@....com>
To: akpm@...ux-foundation.org,
	hannes@...xchg.org
Cc: mhocko@...nel.org,
	roman.gushchin@...ux.dev,
	shakeel.butt@...ux.dev,
	muchun.song@...ux.dev,
	shikemeng@...weicloud.com,
	kasong@...cent.com,
	nphamcs@...il.com,
	bhe@...hat.com,
	baohua@...nel.org,
	chrisl@...nel.org,
	cgroups@...r.kernel.org,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	gunho.lee@....com,
	iamjoonsoo.kim@....com,
	taejoon.song@....com,
	Youngjun Park <youngjun.park@....com>
Subject: [PATCH 2/4] mm: swap: Apply per-cgroup swap priority mechanism to swap layer

This patch applies the per-cgroup swap priority mechanism to the swap layer.

It implements:
- Swap device ID assignment based on the cgroup's effective priority
- Swap device selection respecting cgroup-specific priorities
- Swap on/off propagation logic that updates per-cgroup settings accordingly

Currently, the per-CPU swap cluster cache is bypassed, since different
cgroups may select different devices based on their configured priorities.

Signed-off-by: Youngjun Park <youngjun.park@....com>
---
 mm/swap_cgroup_priority.c |  6 ++---
 mm/swapfile.c             | 46 +++++++++++++++++++++++++++++++++++++--
 2 files changed, 47 insertions(+), 5 deletions(-)

diff --git a/mm/swap_cgroup_priority.c b/mm/swap_cgroup_priority.c
index abbefa6de63a..979bc18d2eed 100644
--- a/mm/swap_cgroup_priority.c
+++ b/mm/swap_cgroup_priority.c
@@ -243,9 +243,9 @@ bool swap_alloc_cgroup_priority(struct mem_cgroup *memcg,
 	unsigned long offset;
 	int node;
 
-	/* TODO
-	 * Per-cpu swapdev cache can't be used directly as cgroup-specific
-	 * priorities may select different devices.
+	/*
+	 * TODO: Per-cpu swap cluster cache can't be used directly
+	 * as cgroup-specific priorities may select different devices.
 	 */
 	spin_lock(&swap_avail_lock);
 	node = numa_node_id();
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4b56f117b2b0..bfd0532ad250 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1029,6 +1029,7 @@ static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
 	for_each_node(nid)
 		plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
 
+	deactivate_swap_cgroup_priority(si, swapoff);
 skip:
 	spin_unlock(&swap_avail_lock);
 }
@@ -1072,6 +1073,7 @@ static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
 	for_each_node(nid)
 		plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
 
+	activate_swap_cgroup_priority(si, swapon);
 skip:
 	spin_unlock(&swap_avail_lock);
 }
@@ -1292,8 +1294,10 @@ int folio_alloc_swap(struct folio *folio, gfp_t gfp)
 	}
 
 	local_lock(&percpu_swap_cluster.lock);
-	if (!swap_alloc_fast(&entry, order))
-		swap_alloc_slow(&entry, order);
+	if (!swap_alloc_cgroup_priority(folio_memcg(folio), &entry, order)) {
+		if (!swap_alloc_fast(&entry, order))
+			swap_alloc_slow(&entry, order);
+	}
 	local_unlock(&percpu_swap_cluster.lock);
 
 	/* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */
@@ -2778,6 +2782,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
 	if (!p->bdev || !bdev_nonrot(p->bdev))
 		atomic_dec(&nr_rotate_swap);
 
+	purge_swap_cgroup_priority();
 	mutex_lock(&swapon_mutex);
 	spin_lock(&swap_lock);
 	spin_lock(&p->lock);
@@ -2895,6 +2900,8 @@ static void swap_stop(struct seq_file *swap, void *v)
 	mutex_unlock(&swapon_mutex);
 }
 
+
+#ifndef CONFIG_SWAP_CGROUP_PRIORITY
 static int swap_show(struct seq_file *swap, void *v)
 {
 	struct swap_info_struct *si = v;
@@ -2921,6 +2928,34 @@ static int swap_show(struct seq_file *swap, void *v)
 			si->prio);
 	return 0;
 }
+#else
+static int swap_show(struct seq_file *swap, void *v)
+{
+	struct swap_info_struct *si = v;
+	struct file *file;
+	int len;
+	unsigned long bytes, inuse;
+
+	if (si == SEQ_START_TOKEN) {
+		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\t\tId\n");
+		return 0;
+	}
+
+	bytes = K(si->pages);
+	inuse = K(swap_usage_in_pages(si));
+
+	file = si->swap_file;
+	len = seq_file_path(swap, file, " \t\n\\");
+	seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\t\t\t%llu\n",
+			len < 40 ? 40 - len : 1, " ",
+			S_ISBLK(file_inode(file)->i_mode) ?
+				"partition" : "file\t",
+			bytes, bytes < 10000000 ? "\t" : "",
+			inuse, inuse < 10000000 ? "\t" : "",
+			si->prio, si->id);
+	return 0;
+}
+#endif
 
 static const struct seq_operations swaps_op = {
 	.start =	swap_start,
@@ -3463,6 +3498,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 		goto free_swap_zswap;
 	}
 
+	error = prepare_swap_cgroup_priority(si->type);
+	if (error) {
+		inode->i_flags &= ~S_SWAPFILE;
+		goto free_swap_zswap;
+	}
+	get_swapdev_id(si);
+
 	mutex_lock(&swapon_mutex);
 	prio = -1;
 	if (swap_flags & SWAP_FLAG_PREFER)
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ