[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230729083737.38699-4-leobras@redhat.com>
Date: Sat, 29 Jul 2023 05:37:33 -0300
From: Leonardo Bras <leobras@...hat.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Marcelo Tosatti <mtosatti@...hat.com>,
linux-kernel@...r.kernel.org
Cc: Leonardo Bras <leobras@...hat.com>
Subject: [RFC PATCH 2/4] swap: apply new local_schedule_work_on() interface
Make use of the new local_*lock_n*() and local_schedule_work_on() interface
to improve performance & latency on PREEMTP_RT kernels.
For functions that may be scheduled in a different cpu, replace
local_*lock*() by local_lock_n*(), and replace schedule_work_on() by
local_schedule_work_on(). The same happens for flush_work() and
local_flush_work().
This should bring no relevant performance impact on non-RT kernels:
For functions that may be scheduled in a different cpu, the local_*lock's
this_cpu_ptr() becomes a per_cpu_ptr(smp_processor_id()).
Signed-off-by: Leonardo Bras <leobras@...hat.com>
---
mm/swap.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index cd8f0150ba3a..a79f2091eae5 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -760,11 +760,11 @@ void lru_add_drain(void)
* the same cpu. It shouldn't be a problem in !SMP case since
* the core is only one and the locks will disable preemption.
*/
-static void lru_add_and_bh_lrus_drain(void)
+static void lru_add_and_bh_lrus_drain(int cpu)
{
- local_lock(&cpu_fbatches.lock);
- lru_add_drain_cpu(smp_processor_id());
- local_unlock(&cpu_fbatches.lock);
+ local_lock_n(&cpu_fbatches.lock, cpu);
+ lru_add_drain_cpu(cpu);
+ local_unlock_n(&cpu_fbatches.lock, cpu);
invalidate_bh_lrus_cpu();
mlock_drain_local();
}
@@ -782,9 +782,9 @@ void lru_add_drain_cpu_zone(struct zone *zone)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-static void lru_add_drain_per_cpu(struct work_struct *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *w)
{
- lru_add_and_bh_lrus_drain();
+ lru_add_and_bh_lrus_drain(w->data.counter);
}
static bool cpu_needs_drain(unsigned int cpu)
@@ -888,13 +888,13 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
if (cpu_needs_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
- queue_work_on(cpu, mm_percpu_wq, work);
+ local_queue_work_on(cpu, mm_percpu_wq, work);
__cpumask_set_cpu(cpu, &has_work);
}
}
for_each_cpu(cpu, &has_work)
- flush_work(&per_cpu(lru_add_drain_work, cpu));
+ local_flush_work(&per_cpu(lru_add_drain_work, cpu));
done:
mutex_unlock(&lock);
@@ -941,7 +941,7 @@ void lru_cache_disable(void)
#ifdef CONFIG_SMP
__lru_add_drain_all(true);
#else
- lru_add_and_bh_lrus_drain();
+ lru_add_and_bh_lrus_drain(smp_processor_id());
#endif
}
--
2.41.0
Powered by blists - more mailing lists