lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1317170947-17074-7-git-send-email-walken@google.com>
Date:	Tue, 27 Sep 2011 17:49:04 -0700
From:	Michel Lespinasse <walken@...gle.com>
To:	linux-mm@...ck.org, linux-kernel@...r.kernel.org,
	Andrew Morton <akpm@...ux-foundation.org>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>,
	Dave Hansen <dave@...ux.vnet.ibm.com>,
	Rik van Riel <riel@...hat.com>,
	Balbir Singh <bsingharora@...il.com>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Andrea Arcangeli <aarcange@...hat.com>,
	Johannes Weiner <jweiner@...hat.com>,
	KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>,
	Hugh Dickins <hughd@...gle.com>,
	Michael Wolf <mjwolf@...ibm.com>
Subject: [PATCH 6/9] kstaled: rate limit pages scanned per second.

Scan some number of pages from each node every second, instead of trying to
scan the entime memory at once and being idle for the rest of the configured
interval.

In addition to spreading the CPU usage over the entire scanning interval,
this also reduces the jitter between two consecutive scans of the same page.


Signed-off-by: Michel Lespinasse <walken@...gle.com>
---
 include/linux/mmzone.h |    3 ++
 mm/memcontrol.c        |   71 ++++++++++++++++++++++++++++++++++-------------
 2 files changed, 54 insertions(+), 20 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6657106..272fbed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -631,6 +631,9 @@ typedef struct pglist_data {
 	unsigned long node_present_pages; /* total number of physical pages */
 	unsigned long node_spanned_pages; /* total size of physical page
 					     range, including holes */
+#ifdef CONFIG_KSTALED
+	unsigned long node_idle_scan_pfn;
+#endif
 	int node_id;
 	wait_queue_head_t kswapd_wait;
 	struct task_struct *kswapd;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b75d41f..b468867 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5736,15 +5736,19 @@ static unsigned kstaled_scan_page(struct page *page)
 	return nr_pages;
 }
 
-static void kstaled_scan_node(pg_data_t *pgdat)
+static bool kstaled_scan_node(pg_data_t *pgdat, int scan_seconds, bool reset)
 {
 	unsigned long flags;
-	unsigned long pfn, end;
+	unsigned long pfn, end, node_end;
 
 	pgdat_resize_lock(pgdat, &flags);
 
 	pfn = pgdat->node_start_pfn;
-	end = pfn + pgdat->node_spanned_pages;
+	node_end = pfn + pgdat->node_spanned_pages;
+	if (!reset && pfn < pgdat->node_idle_scan_pfn)
+		pfn = pgdat->node_idle_scan_pfn;
+	end = min(pfn + DIV_ROUND_UP(pgdat->node_spanned_pages, scan_seconds),
+		  node_end);
 
 	while (pfn < end) {
 		unsigned long contiguous = end;
@@ -5761,8 +5765,8 @@ static void kstaled_scan_node(pg_data_t *pgdat)
 #ifdef CONFIG_MEMORY_HOTPLUG
 				/* abort if the node got resized */
 				if (pfn < pgdat->node_start_pfn ||
-				    end > (pgdat->node_start_pfn +
-					   pgdat->node_spanned_pages))
+				    node_end > (pgdat->node_start_pfn +
+						pgdat->node_spanned_pages))
 					goto abort;
 #endif
 			}
@@ -5774,17 +5778,30 @@ static void kstaled_scan_node(pg_data_t *pgdat)
 
 abort:
 	pgdat_resize_unlock(pgdat, &flags);
+
+	pgdat->node_idle_scan_pfn = min(pfn, end);
+	return pfn >= node_end;
 }
 
 static int kstaled(void *dummy)
 {
+	bool reset = true;
+	long deadline = jiffies;
+
 	while (1) {
 		int scan_seconds;
 		int nid;
-		struct mem_cgroup *memcg;
+		long delta;
+		bool scan_done;
+
+		deadline += HZ;
+		scan_seconds = kstaled_scan_seconds;
+		if (scan_seconds <= 0) {
+			wait_event_interruptible(kstaled_wait,
+				(scan_seconds = kstaled_scan_seconds) > 0);
+			deadline = jiffies + HZ;
+		}
 
-		wait_event_interruptible(kstaled_wait,
-				 (scan_seconds = kstaled_scan_seconds) > 0);
 		/*
 		 * We use interruptible wait_event so as not to contribute
 		 * to the machine load average while we're sleeping.
@@ -5794,21 +5811,35 @@ static int kstaled(void *dummy)
 		 */
 		BUG_ON(scan_seconds <= 0);
 
-		for_each_mem_cgroup_all(memcg)
-			memset(&memcg->idle_scan_stats, 0,
-			       sizeof(memcg->idle_scan_stats));
-
+		scan_done = true;
 		for_each_node_state(nid, N_HIGH_MEMORY)
-			kstaled_scan_node(NODE_DATA(nid));
-
-		for_each_mem_cgroup_all(memcg) {
-			write_seqcount_begin(&memcg->idle_page_stats_lock);
-			memcg->idle_page_stats = memcg->idle_scan_stats;
-			memcg->idle_page_scans++;
-			write_seqcount_end(&memcg->idle_page_stats_lock);
+			scan_done &= kstaled_scan_node(NODE_DATA(nid),
+						       scan_seconds, reset);
+
+		if (scan_done) {
+			struct mem_cgroup *memcg;
+
+			for_each_mem_cgroup_all(memcg) {
+				write_seqcount_begin(
+					&memcg->idle_page_stats_lock);
+				memcg->idle_page_stats =
+					memcg->idle_scan_stats;
+				memcg->idle_page_scans++;
+				write_seqcount_end(
+					&memcg->idle_page_stats_lock);
+				memset(&memcg->idle_scan_stats, 0,
+				       sizeof(memcg->idle_scan_stats));
+			}
 		}
 
-		schedule_timeout_interruptible(scan_seconds * HZ);
+		delta = jiffies - deadline;
+		if (delta < 0)
+			schedule_timeout_interruptible(-delta);
+		else if (delta >= HZ)
+			pr_warning("kstaled running %ld.%02d seconds late\n",
+				   delta / HZ, (int)(delta % HZ) * 100 / HZ);
+
+		reset = scan_done;
 	}
 
 	BUG();
-- 
1.7.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ