[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251026203611.1608903-3-surenb@google.com>
Date: Sun, 26 Oct 2025 13:36:05 -0700
From: Suren Baghdasaryan <surenb@...gle.com>
To: akpm@...ux-foundation.org
Cc: david@...hat.com, lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com,
vbabka@...e.cz, alexandru.elisei@....com, peterx@...hat.com, sj@...nel.org,
rppt@...nel.org, mhocko@...e.com, corbet@....net, axboe@...nel.dk,
viro@...iv.linux.org.uk, brauner@...nel.org, hch@...radead.org, jack@...e.cz,
willy@...radead.org, m.szyprowski@...sung.com, robin.murphy@....com,
hannes@...xchg.org, zhengqi.arch@...edance.com, shakeel.butt@...ux.dev,
axelrasmussen@...gle.com, yuanchu@...gle.com, weixugc@...gle.com,
minchan@...nel.org, surenb@...gle.com, linux-mm@...ck.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-block@...r.kernel.org, linux-fsdevel@...r.kernel.org,
iommu@...ts.linux.dev, Minchan Kim <minchan@...gle.com>
Subject: [PATCH v2 2/8] mm/cleancache: add cleancache LRU for folio aging
Once all folios in the cleancache are used to store data from previously
evicted folios, no more data can be stored there. To avoid that situation
we can drop older data and make space for new one.
Add an LRU for cleancache folios to reclaim the oldest folio when
cleancache is full and we need to store a new folio.
Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
Signed-off-by: Minchan Kim <minchan@...gle.com>
---
mm/cleancache.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 89 insertions(+), 2 deletions(-)
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 26fb91b987b7..3acf46c0cdd1 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -18,6 +18,13 @@
*
* ccinode->folios.xa_lock
* pool->lock
+ *
+ * ccinode->folios.xa_lock
+ * lru_lock
+ *
+ * ccinode->folios.xa_lock
+ * lru_lock
+ * pool->lock
*/
#define INODE_HASH_BITS 6
@@ -58,6 +65,8 @@ static struct kmem_cache *slab_inode; /* cleancache_inode slab */
static struct cleancache_pool pools[CLEANCACHE_MAX_POOLS];
static atomic_t nr_pools = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(pools_lock); /* protects pools */
+static LIST_HEAD(cleancache_lru);
+static DEFINE_SPINLOCK(lru_lock); /* protects cleancache_lru */
static inline void init_cleancache_folio(struct folio *folio, int pool_id)
{
@@ -73,6 +82,7 @@ static inline void clear_cleancache_folio(struct folio *folio)
{
/* Folio must be detached and not in the pool. No locking is needed. */
VM_BUG_ON(folio->cc_inode);
+ VM_BUG_ON(!list_empty(&folio->lru));
folio->cc_pool_id = -1;
}
@@ -123,6 +133,7 @@ static inline bool is_folio_attached(struct folio *folio)
/*
* Folio pool helpers.
* Only detached folios are stored in the pool->folio_list.
+ * Once a folio gets attached, it's placed on the cleancache LRU list.
*
* Locking:
* pool->folio_list is accessed under pool->lock.
@@ -174,6 +185,32 @@ static struct folio *pick_folio_from_any_pool(void)
return folio;
}
+/* Folio LRU helpers. Only attached folios are stored in the cleancache_lru. */
+static void add_folio_to_lru(struct folio *folio)
+{
+ VM_BUG_ON(!list_empty(&folio->lru));
+
+ spin_lock(&lru_lock);
+ list_add(&folio->lru, &cleancache_lru);
+ spin_unlock(&lru_lock);
+}
+
+static void rotate_lru_folio(struct folio *folio)
+{
+ spin_lock(&lru_lock);
+ if (!list_empty(&folio->lru))
+ list_move(&folio->lru, &cleancache_lru);
+ spin_unlock(&lru_lock);
+}
+
+static void delete_folio_from_lru(struct folio *folio)
+{
+ spin_lock(&lru_lock);
+ if (!list_empty(&folio->lru))
+ list_del_init(&folio->lru);
+ spin_unlock(&lru_lock);
+}
+
/* FS helpers */
static struct cleancache_fs *get_fs(int fs_id)
{
@@ -306,6 +343,7 @@ static void erase_folio_from_inode(struct cleancache_inode *ccinode,
removed = __xa_erase(&ccinode->folios, offset);
VM_BUG_ON(!removed);
+ delete_folio_from_lru(folio);
remove_inode_if_empty(ccinode);
}
@@ -403,6 +441,48 @@ static struct cleancache_inode *add_and_get_inode(struct cleancache_fs *fs,
return ccinode;
}
+static struct folio *reclaim_folio_from_lru(void)
+{
+ struct cleancache_inode *ccinode;
+ struct folio *folio;
+ pgoff_t offset;
+
+again:
+ spin_lock(&lru_lock);
+ if (list_empty(&cleancache_lru)) {
+ spin_unlock(&lru_lock);
+ return NULL;
+ }
+ ccinode = NULL;
+ /* Get the ccinode of the folio at the LRU tail */
+ list_for_each_entry_reverse(folio, &cleancache_lru, lru) {
+ struct cleancache_pool *pool = folio_pool(folio);
+
+ /* Find and get ccinode */
+ spin_lock(&pool->lock);
+ folio_attachment(folio, &ccinode, &offset);
+ if (ccinode && !get_inode(ccinode))
+ ccinode = NULL;
+ spin_unlock(&pool->lock);
+ if (ccinode)
+ break;
+ }
+ spin_unlock(&lru_lock);
+
+ if (!ccinode)
+ return NULL; /* No ccinode to reclaim */
+
+ if (!isolate_folio_from_inode(ccinode, offset, folio)) {
+ /* Retry if the folio got erased from the ccinode */
+ put_inode(ccinode);
+ goto again;
+ }
+
+ put_inode(ccinode);
+
+ return folio;
+}
+
static void copy_folio_content(struct folio *from, struct folio *to)
{
void *src = kmap_local_folio(from, 0);
@@ -458,14 +538,19 @@ static bool store_into_inode(struct cleancache_fs *fs,
move_folio_from_inode_to_pool(ccinode, offset, stored_folio);
goto out_unlock;
}
+ rotate_lru_folio(stored_folio);
} else {
if (!workingset)
goto out_unlock;
stored_folio = pick_folio_from_any_pool();
if (!stored_folio) {
- /* No free folios, TODO: try reclaiming */
- goto out_unlock;
+ /* No free folios, try reclaiming */
+ xa_unlock(&ccinode->folios);
+ stored_folio = reclaim_folio_from_lru();
+ xa_lock(&ccinode->folios);
+ if (!stored_folio)
+ goto out_unlock;
}
if (!store_folio_in_inode(ccinode, offset, stored_folio)) {
@@ -477,6 +562,7 @@ static bool store_into_inode(struct cleancache_fs *fs,
spin_unlock(&pool->lock);
goto out_unlock;
}
+ add_folio_to_lru(stored_folio);
}
copy_folio_content(folio, stored_folio);
@@ -506,6 +592,7 @@ static bool load_from_inode(struct cleancache_fs *fs,
xa_lock(&ccinode->folios);
stored_folio = xa_load(&ccinode->folios, offset);
if (stored_folio) {
+ rotate_lru_folio(stored_folio);
copy_folio_content(stored_folio, folio);
ret = true;
}
--
2.51.1.851.g4ebd6896fd-goog
Powered by blists - more mailing lists