lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260123045733.6954-5-ravis.opensrc@gmail.com>
Date: Thu, 22 Jan 2026 20:57:27 -0800
From: Ravi Jonnalagadda <ravis.opensrc@...il.com>
To: damon@...ts.linux.dev,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	linux-doc@...r.kernel.org
Cc: sj@...nel.org,
	akpm@...ux-foundation.org,
	corbet@....net,
	bijan311@...il.com,
	ajayjoshi@...ron.com,
	Ravi Jonnalagadda <ravis.opensrc@...il.com>
Subject: [RFC PATCH 4/5] mm/damon/paddr: capacity clamp and directional early-exit for node_sys_bp

Clamp effective target to node capacity (bp) and skip in-migration if
the node already meets/exceeds it. This avoids oscillation and
unnecessary work in two-context DRAM/CXL setups when quota goals
(e.g., node_sys_bp) are met.

Signed-off-by: Ravi Jonnalagadda <ravis.opensrc@...il.com>
---
 mm/damon/paddr.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 30e4e5663dcb..64dbdd2196a5 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -300,10 +300,54 @@ static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
 			sz_filter_passed);
 }
 
+static unsigned long damon_pa_node_capacity_bp(int nid)
+{
+	struct pglist_data *pgdat;
+	unsigned long sys_total = damon_pa_totalram_bytes();
+	unsigned long node_pages, node_total;
+
+	if (nid < 0 || !sys_total)
+		return 0;
+	pgdat = NODE_DATA(nid);
+	if (!pgdat)
+		return 0;
+	node_pages = pgdat->node_spanned_pages;
+	node_total = node_pages << PAGE_SHIFT;
+	return div64_u64((u64)node_total * 10000ULL, sys_total);
+}
+
 static unsigned long damon_pa_migrate(struct damon_region *r,
 		unsigned long addr_unit, struct damos *s,
 		unsigned long *sz_filter_passed)
 {
+	/*
+	 * Capacity clamp + directional early-exit for node_sys_bp goals:
+	 * If we are migrating INTO g->nid and the current bp for that node is
+	 * already >= min(target_bp, capacity_bp), skip work this interval.
+	 */
+	{
+		struct damos_quota_goal *g;
+
+		list_for_each_entry(g, &s->quota.goals, list) {
+			unsigned long cap_bp, effective_target_bp;
+
+			if (g->metric != DAMOS_QUOTA_NODE_SYS_BP)
+				continue;
+			if (g->nid < 0)
+				continue;
+
+			cap_bp = damon_pa_node_capacity_bp(g->nid);
+			if (!cap_bp)
+				break;
+
+			effective_target_bp = min(g->target_value, cap_bp);
+			if (s->target_nid == g->nid &&
+			    g->current_value >= effective_target_bp)
+				return 0;
+			break;
+		}
+	}
+
 	phys_addr_t addr, applied;
 	LIST_HEAD(folio_list);
 	struct folio *folio;
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ