[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250210105028.2134-1-hdanton@sina.com>
Date: Mon, 10 Feb 2025 18:50:26 +0800
From: Hillf Danton <hdanton@...a.com>
To: Frederic Weisbecker <frederic@...nel.org>
Cc: Marcelo Tosatti <mtosatti@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...nel.org>,
linux-mm@...ck.org,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 6/6 v2] mm: Drain LRUs upon resume to userspace on nohz_full CPUs
On Sun, 9 Feb 2025 23:30:04 +0100 Frederic Weisbecker <frederic@...nel.org>
> @@ -769,6 +772,9 @@ static bool cpu_needs_drain(unsigned int cpu)
> {
> struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
>
> + if (!housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
> + return false;
> +
> /* Check these in order of likelihood that they're not zero */
> return folio_batch_count(&fbatches->lru_add) ||
> folio_batch_count(&fbatches->lru_move_tail) ||
> --
> 2.46.0
Nit, I'd like to add a debug line to test your assumption that
isolated tasks are pinned to a single nohz_full CPU.
--- x/mm/swap.c
+++ y/mm/swap.c
@@ -767,9 +767,10 @@ static void lru_add_drain_per_cpu(struct
static bool cpu_needs_drain(unsigned int cpu)
{
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+ bool yes;
/* Check these in order of likelihood that they're not zero */
- return folio_batch_count(&fbatches->lru_add) ||
+ yes = folio_batch_count(&fbatches->lru_add) ||
folio_batch_count(&fbatches->lru_move_tail) ||
folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
@@ -777,6 +778,12 @@ static bool cpu_needs_drain(unsigned int
folio_batch_count(&fbatches->lru_activate) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
+
+ if (!housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
+ VM_BUG_ON(yes);
+ return false;
+ }
+ return yes;
}
/*
Powered by blists - more mailing lists