[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210814052519.86679-7-songmuchun@bytedance.com>
Date: Sat, 14 Aug 2021 13:25:13 +0800
From: Muchun Song <songmuchun@...edance.com>
To: guro@...com, hannes@...xchg.org, mhocko@...nel.org,
akpm@...ux-foundation.org, shakeelb@...gle.com,
vdavydov.dev@...il.com
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
duanxiongchun@...edance.com, fam.zheng@...edance.com,
bsingharora@...il.com, shy828301@...il.com, alexs@...nel.org,
smuchun@...il.com, zhengqi.arch@...edance.com,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v1 06/12] mm: thp: make split queue lock safe when LRU pages are reparented
Similar to the lruvec lock, we use the same approach to make the split
queue lock safe when LRU pages are reparented.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/huge_memory.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c49ef28e48c1..22fbf2c74d49 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -535,9 +535,22 @@ static struct deferred_split *folio_split_queue_lock(struct folio *folio)
{
struct deferred_split *queue;
+ rcu_read_lock();
+retry:
queue = folio_split_queue(folio);
spin_lock(&queue->split_queue_lock);
+ if (unlikely(split_queue_memcg(queue) != folio_memcg(folio))) {
+ spin_unlock(&queue->split_queue_lock);
+ goto retry;
+ }
+
+ /*
+ * Preemption is disabled in the internal of spin_lock, which can serve
+ * as RCU read-side critical sections.
+ */
+ rcu_read_unlock();
+
return queue;
}
@@ -546,9 +559,19 @@ folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
{
struct deferred_split *queue;
+ rcu_read_lock();
+retry:
queue = folio_split_queue(folio);
spin_lock_irqsave(&queue->split_queue_lock, *flags);
+ if (unlikely(split_queue_memcg(queue) != folio_memcg(folio))) {
+ spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
+ goto retry;
+ }
+
+ /* See the comments in folio_split_queue_lock(). */
+ rcu_read_unlock();
+
return queue;
}
--
2.11.0
Powered by blists - more mailing lists