lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 17 Aug 2022 16:01:39 -0300
From:   Marcelo Tosatti <mtosatti@...hat.com>
To:     atomlin@...hat.com, frederic@...nel.org
Cc:     cl@...ux.com, tglx@...utronix.de, mingo@...nel.org,
        peterz@...radead.org, pauld@...hat.com, neelx@...hat.com,
        oleksandr@...alenko.name, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org, Marcelo Tosatti <mtosatti@...hat.com>
Subject: [patch 3/3] mm/vmstat: do not queue vmstat_update if tick is stopped

>From the vmstat shepherd, for CPUs that have the tick stopped,
do not queue local work to flush the per-CPU vmstats, since 
in that case the flush is performed on return to
userspace or when entering idle.

Per-cpu pages can be freed remotely from housekeeping CPUs.

Move the quiet_vmstat call after ts->tick_stopped = 1 assignment.

Signed-off-by: Marcelo Tosatti <mtosatti@...hat.com>

---
 kernel/time/tick-sched.c |    6 +++---
 mm/vmstat.c              |   22 +++++++++++++++++-----
 2 files changed, 20 insertions(+), 8 deletions(-)

Index: linux-2.6/mm/vmstat.c
===================================================================
--- linux-2.6.orig/mm/vmstat.c
+++ linux-2.6/mm/vmstat.c
@@ -29,6 +29,7 @@
 #include <linux/page_ext.h>
 #include <linux/page_owner.h>
 #include <linux/migrate.h>
+#include <linux/tick.h>
 
 #include "internal.h"
 
@@ -1973,19 +1974,27 @@ static void vmstat_update(struct work_st
  */
 void quiet_vmstat(void)
 {
+	struct delayed_work *dw;
+
 	if (system_state != SYSTEM_RUNNING)
 		return;
 
 	if (!__this_cpu_read(vmstat_dirty))
 		return;
 
+	refresh_cpu_vm_stats(false);
+
 	/*
-	 * Just refresh counters and do not care about the pending delayed
-	 * vmstat_update. It doesn't fire that often to matter and canceling
-	 * it would be too expensive from this path.
-	 * vmstat_shepherd will take care about that for us.
+	 * If the tick is stopped, cancel any delayed work to avoid
+	 * interruptions to this CPU in the future.
+	 *
+	 * Otherwise just refresh counters and do not care about the pending
+	 * delayed vmstat_update. It doesn't fire that often to matter
+	 * and canceling it would be too expensive from this path.
 	 */
-	refresh_cpu_vm_stats(false);
+	dw = &per_cpu(vmstat_work, smp_processor_id());
+	if (delayed_work_pending(dw) && tick_nohz_tick_stopped())
+		cancel_delayed_work(dw);
 }
 
 /*
@@ -2007,6 +2016,9 @@ static void vmstat_shepherd(struct work_
 	for_each_online_cpu(cpu) {
 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
+		if (tick_nohz_tick_stopped_cpu(cpu))
+			continue;
+
 		if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
 
Index: linux-2.6/kernel/time/tick-sched.c
===================================================================
--- linux-2.6.orig/kernel/time/tick-sched.c
+++ linux-2.6/kernel/time/tick-sched.c
@@ -905,9 +905,6 @@ static void tick_nohz_stop_tick(struct t
 		ts->do_timer_last = 0;
 	}
 
-	/* Attempt to fold when the idle tick is stopped or not */
-	quiet_vmstat();
-
 	/* Skip reprogram of event if its not changed */
 	if (ts->tick_stopped && (expires == ts->next_tick)) {
 		/* Sanity check: make sure clockevent is actually programmed */
@@ -935,6 +932,9 @@ static void tick_nohz_stop_tick(struct t
 		trace_tick_stop(1, TICK_DEP_MASK_NONE);
 	}
 
+	/* Attempt to fold when the idle tick is stopped or not */
+	quiet_vmstat();
+
 	ts->next_tick = tick;
 
 	/*


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ