[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240711021317.596178-6-yuzhao@google.com>
Date: Wed, 10 Jul 2024 20:13:17 -0600
From: Yu Zhao <yuzhao@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Yu Zhao <yuzhao@...gle.com>
Subject: [PATCH mm-unstable v1 5/5] mm/swap: remove boilerplate
Remove boilerplate by using a macro to choose the corresponding lock
and handler for each folio_batch in cpu_fbatches.
Signed-off-by: Yu Zhao <yuzhao@...gle.com>
---
mm/swap.c | 107 +++++++++++++++++++-----------------------------------
1 file changed, 37 insertions(+), 70 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index 4a66d2f87f26..342ff4e39ba4 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -220,16 +220,45 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
folios_put(fbatch);
}
-static void folio_batch_add_and_move(struct folio_batch *fbatch,
- struct folio *folio, move_fn_t move_fn)
+static void __folio_batch_add_and_move(struct folio_batch *fbatch,
+ struct folio *folio, move_fn_t move_fn,
+ bool on_lru, bool disable_irq)
{
+ unsigned long flags;
+
+ folio_get(folio);
+
+ if (on_lru && !folio_test_clear_lru(folio)) {
+ folio_put(folio);
+ return;
+ }
+
if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
!lru_cache_disabled())
return;
+ if (disable_irq)
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ else
+ local_lock(&cpu_fbatches.lock);
+
folio_batch_move_lru(fbatch, move_fn);
+
+ if (disable_irq)
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
+ else
+ local_unlock(&cpu_fbatches.lock);
}
+#define folio_batch_add_and_move(folio, op, on_lru) \
+ __folio_batch_add_and_move( \
+ this_cpu_ptr(&cpu_fbatches.op), \
+ folio, \
+ op, \
+ on_lru, \
+ offsetof(struct cpu_fbatches, op) > offsetof(struct cpu_fbatches, lock_irq) \
+ )
+
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
{
if (folio_test_unevictable(folio))
@@ -250,23 +279,11 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
*/
void folio_rotate_reclaimable(struct folio *folio)
{
- struct folio_batch *fbatch;
- unsigned long flags;
-
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
folio_test_unevictable(folio))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_move_tail);
- folio_batch_add_and_move(fbatch, folio, lru_move_tail);
- local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
+ folio_batch_add_and_move(folio, lru_move_tail, true);
}
void lru_note_cost(struct lruvec *lruvec, bool file,
@@ -355,21 +372,10 @@ static void folio_activate_drain(int cpu)
void folio_activate(struct folio *folio)
{
- struct folio_batch *fbatch;
-
if (folio_test_active(folio) || folio_test_unevictable(folio))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_activate);
- folio_batch_add_and_move(fbatch, folio, lru_activate);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_activate, true);
}
#else
@@ -513,8 +519,6 @@ EXPORT_SYMBOL(folio_mark_accessed);
*/
void folio_add_lru(struct folio *folio)
{
- struct folio_batch *fbatch;
-
VM_BUG_ON_FOLIO(folio_test_active(folio) &&
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
@@ -524,11 +528,7 @@ void folio_add_lru(struct folio *folio)
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
- folio_batch_add_and_move(fbatch, folio, lru_add);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_add, false);
}
EXPORT_SYMBOL(folio_add_lru);
@@ -702,22 +702,11 @@ void lru_add_drain_cpu(int cpu)
*/
void deactivate_file_folio(struct folio *folio)
{
- struct folio_batch *fbatch;
-
/* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_file);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_deactivate_file, true);
}
/*
@@ -730,21 +719,10 @@ void deactivate_file_folio(struct folio *folio)
*/
void folio_deactivate(struct folio *folio)
{
- struct folio_batch *fbatch;
-
if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_deactivate, true);
}
/**
@@ -756,22 +734,11 @@ void folio_deactivate(struct folio *folio)
*/
void folio_mark_lazyfree(struct folio *folio)
{
- struct folio_batch *fbatch;
-
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
return;
- folio_get(folio);
- if (!folio_test_clear_lru(folio)) {
- folio_put(folio);
- return;
- }
-
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
- folio_batch_add_and_move(fbatch, folio, lru_lazyfree);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_lazyfree, true);
}
void lru_add_drain(void)
--
2.45.2.803.g4e1b14247a-goog
Powered by blists - more mailing lists