[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210814052519.86679-4-songmuchun@bytedance.com>
Date: Sat, 14 Aug 2021 13:25:10 +0800
From: Muchun Song <songmuchun@...edance.com>
To: guro@...com, hannes@...xchg.org, mhocko@...nel.org,
akpm@...ux-foundation.org, shakeelb@...gle.com,
vdavydov.dev@...il.com
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
duanxiongchun@...edance.com, fam.zheng@...edance.com,
bsingharora@...il.com, shy828301@...il.com, alexs@...nel.org,
smuchun@...il.com, zhengqi.arch@...edance.com,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v1 03/12] mm: memcontrol: make lruvec lock safe when LRU pages are reparented
The diagram below shows how to make the folio lruvec lock safe when LRU
pages are reparented.
folio_lruvec_lock(folio)
retry:
lruvec = folio_lruvec(folio);
// The folio is reparented at this time.
spin_lock(&lruvec->lru_lock);
if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio)))
// Acquired the wrong lruvec lock and need to retry.
// Because this folio is on the parent memcg lruvec list.
goto retry;
// If we reach here, it means that folio_memcg(folio) is stable.
memcg_reparent_objcgs(memcg)
// lruvec belongs to memcg and lruvec_parent belongs to parent memcg.
spin_lock(&lruvec->lru_lock);
spin_lock(&lruvec_parent->lru_lock);
// Move all the pages from the lruvec list to the parent lruvec list.
spin_unlock(&lruvec_parent->lru_lock);
spin_unlock(&lruvec->lru_lock);
After we acquire the lruvec lock, we need to check whether the folio is
reparented. If so, we need to reacquire the new lruvec lock. On the
routine of the LRU pages reparenting, we will also acquire the lruvec
lock (will be implemented in the later patch). So folio_memcg() cannot
be changed when we hold the lruvec lock.
Since lruvec_memcg(lruvec) is always equal to folio_memcg(folio) after
we hold the lruvec lock, lruvec_memcg_debug() check is pointless. So
remove it.
This is a preparation for reparenting the LRU pages.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
Acked-by: Roman Gushchin <guro@...com>
---
include/linux/memcontrol.h | 18 +++-----------
mm/compaction.c | 10 +++++++-
mm/memcontrol.c | 62 +++++++++++++++++++++++++++++-----------------
mm/swap.c | 4 +++
4 files changed, 55 insertions(+), 39 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 41a35de93d75..5a8f85bd9bbf 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -769,7 +769,9 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
* folio_lruvec - return lruvec for isolating/putting an LRU folio
* @folio: Pointer to the folio.
*
- * This function relies on folio->mem_cgroup being stable.
+ * The lruvec can be changed to its parent lruvec when the page reparented.
+ * The caller need to recheck if it cares about this changes (just like
+ * folio_lruvec_lock() does).
*/
static inline struct lruvec *folio_lruvec(struct folio *folio)
{
@@ -788,15 +790,6 @@ struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags);
-#ifdef CONFIG_DEBUG_VM
-void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
-#else
-static inline
-void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
-{
-}
-#endif
-
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
@@ -1243,11 +1236,6 @@ static inline struct lruvec *folio_lruvec(struct folio *folio)
return &pgdat->__lruvec;
}
-static inline
-void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
-{
-}
-
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
return NULL;
diff --git a/mm/compaction.c b/mm/compaction.c
index 3131558a7c31..97d72f6b3dd5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -515,6 +515,8 @@ compact_folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags,
{
struct lruvec *lruvec;
+ rcu_read_lock();
+retry:
lruvec = folio_lruvec(folio);
/* Track if the lock is contended in async mode */
@@ -527,7 +529,13 @@ compact_folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags,
spin_lock_irqsave(&lruvec->lru_lock, *flags);
out:
- lruvec_memcg_debug(lruvec, folio);
+ if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
+ spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
+ goto retry;
+ }
+
+ /* See the comments in folio_lruvec_lock(). */
+ rcu_read_unlock();
return lruvec;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7df2176e4f02..7955da38e385 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1146,23 +1146,6 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
return ret;
}
-#ifdef CONFIG_DEBUG_VM
-void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
-{
- struct mem_cgroup *memcg;
-
- if (mem_cgroup_disabled())
- return;
-
- memcg = folio_memcg(folio);
-
- if (!memcg)
- VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
- else
- VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
-}
-#endif
-
/**
* folio_lruvec_lock - lock and return lruvec for a given folio.
* @folio: Pointer to the folio.
@@ -1175,20 +1158,43 @@ void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
*/
struct lruvec *folio_lruvec_lock(struct folio *folio)
{
- struct lruvec *lruvec = folio_lruvec(folio);
+ struct lruvec *lruvec;
+ rcu_read_lock();
+retry:
+ lruvec = folio_lruvec(folio);
spin_lock(&lruvec->lru_lock);
- lruvec_memcg_debug(lruvec, folio);
+
+ if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
+ spin_unlock(&lruvec->lru_lock);
+ goto retry;
+ }
+
+ /*
+ * Preemption is disabled in the internal of spin_lock, which can serve
+ * as RCU read-side critical sections.
+ */
+ rcu_read_unlock();
return lruvec;
}
struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
- struct lruvec *lruvec = folio_lruvec(folio);
+ struct lruvec *lruvec;
+ rcu_read_lock();
+retry:
+ lruvec = folio_lruvec(folio);
spin_lock_irq(&lruvec->lru_lock);
- lruvec_memcg_debug(lruvec, folio);
+
+ if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
+ spin_unlock_irq(&lruvec->lru_lock);
+ goto retry;
+ }
+
+ /* See the comments in folio_lruvec_lock(). */
+ rcu_read_unlock();
return lruvec;
}
@@ -1196,10 +1202,20 @@ struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags)
{
- struct lruvec *lruvec = folio_lruvec(folio);
+ struct lruvec *lruvec;
+ rcu_read_lock();
+retry:
+ lruvec = folio_lruvec(folio);
spin_lock_irqsave(&lruvec->lru_lock, *flags);
- lruvec_memcg_debug(lruvec, folio);
+
+ if (unlikely(lruvec_memcg(lruvec) != folio_memcg(folio))) {
+ spin_unlock_irqrestore(&lruvec->lru_lock, *flags);
+ goto retry;
+ }
+
+ /* See the comments in folio_lruvec_lock(). */
+ rcu_read_unlock();
return lruvec;
}
diff --git a/mm/swap.c b/mm/swap.c
index 2bca632b73df..9554ff008fe6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -295,6 +295,10 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
void lru_note_cost_folio(struct folio *folio)
{
+ /*
+ * The rcu read lock is held by the caller, so we do not need to
+ * care about the lruvec returned by folio_lruvec() being released.
+ */
lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
folio_nr_pages(folio));
}
--
2.11.0
Powered by blists - more mailing lists