[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240130014208.565554-21-hannes@cmpxchg.org>
Date: Mon, 29 Jan 2024 20:36:56 -0500
From: Johannes Weiner <hannes@...xchg.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Nhat Pham <nphamcs@...il.com>,
Yosry Ahmed <yosryahmed@...gle.com>,
Chengming Zhou <zhouchengming@...edance.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 20/20] mm: zswap: function ordering: shrink_memcg_cb
shrink_memcg_cb() is called by the shrinker and is based on
zswap_writeback_entry(). Move it in between. Save one fwd decl.
Signed-off-by: Johannes Weiner <hannes@...xchg.org>
---
mm/zswap.c | 125 ++++++++++++++++++++++++++---------------------------
1 file changed, 61 insertions(+), 64 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 0cb3437d47eb..4aea03285532 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1254,7 +1254,67 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* shrinker functions
**********************************/
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
- spinlock_t *lock, void *arg);
+ spinlock_t *lock, void *arg)
+{
+ struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
+ bool *encountered_page_in_swapcache = (bool *)arg;
+ swp_entry_t swpentry;
+ enum lru_status ret = LRU_REMOVED_RETRY;
+ int writeback_result;
+
+ /*
+ * Rotate the entry to the tail before unlocking the LRU,
+ * so that in case of an invalidation race concurrent
+ * reclaimers don't waste their time on it.
+ *
+ * If writeback succeeds, or failure is due to the entry
+ * being invalidated by the swap subsystem, the invalidation
+ * will unlink and free it.
+ *
+ * Temporary failures, where the same entry should be tried
+ * again immediately, almost never happen for this shrinker.
+ * We don't do any trylocking; -ENOMEM comes closest,
+ * but that's extremely rare and doesn't happen spuriously
+ * either. Don't bother distinguishing this case.
+ *
+ * But since they do exist in theory, the entry cannot just
+ * be unlinked, or we could leak it. Hence, rotate.
+ */
+ list_move_tail(item, &l->list);
+
+ /*
+ * Once the lru lock is dropped, the entry might get freed. The
+ * swpentry is copied to the stack, and entry isn't deref'd again
+ * until the entry is verified to still be alive in the tree.
+ */
+ swpentry = entry->swpentry;
+
+ /*
+ * It's safe to drop the lock here because we return either
+ * LRU_REMOVED_RETRY or LRU_RETRY.
+ */
+ spin_unlock(lock);
+
+ writeback_result = zswap_writeback_entry(entry, swpentry);
+
+ if (writeback_result) {
+ zswap_reject_reclaim_fail++;
+ ret = LRU_RETRY;
+
+ /*
+ * Encountering a page already in swap cache is a sign that we are shrinking
+ * into the warmer region. We should terminate shrinking (if we're in the dynamic
+ * shrinker context).
+ */
+ if (writeback_result == -EEXIST && encountered_page_in_swapcache)
+ *encountered_page_in_swapcache = true;
+ } else {
+ zswap_written_back_pages++;
+ }
+
+ spin_lock(lock);
+ return ret;
+}
static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
@@ -1354,69 +1414,6 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool)
pool->shrinker->seeks = DEFAULT_SEEKS;
}
-static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
- spinlock_t *lock, void *arg)
-{
- struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
- bool *encountered_page_in_swapcache = (bool *)arg;
- swp_entry_t swpentry;
- enum lru_status ret = LRU_REMOVED_RETRY;
- int writeback_result;
-
- /*
- * Rotate the entry to the tail before unlocking the LRU,
- * so that in case of an invalidation race concurrent
- * reclaimers don't waste their time on it.
- *
- * If writeback succeeds, or failure is due to the entry
- * being invalidated by the swap subsystem, the invalidation
- * will unlink and free it.
- *
- * Temporary failures, where the same entry should be tried
- * again immediately, almost never happen for this shrinker.
- * We don't do any trylocking; -ENOMEM comes closest,
- * but that's extremely rare and doesn't happen spuriously
- * either. Don't bother distinguishing this case.
- *
- * But since they do exist in theory, the entry cannot just
- * be unlinked, or we could leak it. Hence, rotate.
- */
- list_move_tail(item, &l->list);
-
- /*
- * Once the lru lock is dropped, the entry might get freed. The
- * swpentry is copied to the stack, and entry isn't deref'd again
- * until the entry is verified to still be alive in the tree.
- */
- swpentry = entry->swpentry;
-
- /*
- * It's safe to drop the lock here because we return either
- * LRU_REMOVED_RETRY or LRU_RETRY.
- */
- spin_unlock(lock);
-
- writeback_result = zswap_writeback_entry(entry, swpentry);
-
- if (writeback_result) {
- zswap_reject_reclaim_fail++;
- ret = LRU_RETRY;
-
- /*
- * Encountering a page already in swap cache is a sign that we are shrinking
- * into the warmer region. We should terminate shrinking (if we're in the dynamic
- * shrinker context).
- */
- if (writeback_result == -EEXIST && encountered_page_in_swapcache)
- *encountered_page_in_swapcache = true;
- } else {
- zswap_written_back_pages++;
- }
-
- spin_lock(lock);
- return ret;
-}
-
static int shrink_memcg(struct mem_cgroup *memcg)
{
struct zswap_pool *pool;
--
2.43.0
Powered by blists - more mailing lists