[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251002081247.51255-26-byungchul@sk.com>
Date: Thu, 2 Oct 2025 17:12:25 +0900
From: Byungchul Park <byungchul@...com>
To: linux-kernel@...r.kernel.org
Cc: kernel_team@...ynix.com,
torvalds@...ux-foundation.org,
damien.lemoal@...nsource.wdc.com,
linux-ide@...r.kernel.org,
adilger.kernel@...ger.ca,
linux-ext4@...r.kernel.org,
mingo@...hat.com,
peterz@...radead.org,
will@...nel.org,
tglx@...utronix.de,
rostedt@...dmis.org,
joel@...lfernandes.org,
sashal@...nel.org,
daniel.vetter@...ll.ch,
duyuyang@...il.com,
johannes.berg@...el.com,
tj@...nel.org,
tytso@....edu,
willy@...radead.org,
david@...morbit.com,
amir73il@...il.com,
gregkh@...uxfoundation.org,
kernel-team@....com,
linux-mm@...ck.org,
akpm@...ux-foundation.org,
mhocko@...nel.org,
minchan@...nel.org,
hannes@...xchg.org,
vdavydov.dev@...il.com,
sj@...nel.org,
jglisse@...hat.com,
dennis@...nel.org,
cl@...ux.com,
penberg@...nel.org,
rientjes@...gle.com,
vbabka@...e.cz,
ngupta@...are.org,
linux-block@...r.kernel.org,
josef@...icpanda.com,
linux-fsdevel@...r.kernel.org,
jack@...e.cz,
jlayton@...nel.org,
dan.j.williams@...el.com,
hch@...radead.org,
djwong@...nel.org,
dri-devel@...ts.freedesktop.org,
rodrigosiqueiramelo@...il.com,
melissa.srw@...il.com,
hamohammed.sa@...il.com,
harry.yoo@...cle.com,
chris.p.wilson@...el.com,
gwan-gyeong.mun@...el.com,
max.byungchul.park@...il.com,
boqun.feng@...il.com,
longman@...hat.com,
yunseong.kim@...csson.com,
ysk@...lloc.com,
yeoreum.yun@....com,
netdev@...r.kernel.org,
matthew.brost@...el.com,
her0gyugyu@...il.com,
corbet@....net,
catalin.marinas@....com,
bp@...en8.de,
dave.hansen@...ux.intel.com,
x86@...nel.org,
hpa@...or.com,
luto@...nel.org,
sumit.semwal@...aro.org,
gustavo@...ovan.org,
christian.koenig@....com,
andi.shyti@...nel.org,
arnd@...db.de,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
rppt@...nel.org,
surenb@...gle.com,
mcgrof@...nel.org,
petr.pavlu@...e.com,
da.gomez@...nel.org,
samitolvanen@...gle.com,
paulmck@...nel.org,
frederic@...nel.org,
neeraj.upadhyay@...nel.org,
joelagnelf@...dia.com,
josh@...htriplett.org,
urezki@...il.com,
mathieu.desnoyers@...icios.com,
jiangshanlai@...il.com,
qiang.zhang@...ux.dev,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
chuck.lever@...cle.com,
neil@...wn.name,
okorniev@...hat.com,
Dai.Ngo@...cle.com,
tom@...pey.com,
trondmy@...nel.org,
anna@...nel.org,
kees@...nel.org,
bigeasy@...utronix.de,
clrkwllms@...nel.org,
mark.rutland@....com,
ada.coupriediaz@....com,
kristina.martsenko@....com,
wangkefeng.wang@...wei.com,
broonie@...nel.org,
kevin.brodsky@....com,
dwmw@...zon.co.uk,
shakeel.butt@...ux.dev,
ast@...nel.org,
ziy@...dia.com,
yuzhao@...gle.com,
baolin.wang@...ux.alibaba.com,
usamaarif642@...il.com,
joel.granados@...nel.org,
richard.weiyang@...il.com,
geert+renesas@...der.be,
tim.c.chen@...ux.intel.com,
linux@...blig.org,
alexander.shishkin@...ux.intel.com,
lillian@...r-ark.net,
chenhuacai@...nel.org,
francesco@...la.it,
guoweikang.kernel@...il.com,
link@...o.com,
jpoimboe@...nel.org,
masahiroy@...nel.org,
brauner@...nel.org,
thomas.weissschuh@...utronix.de,
oleg@...hat.com,
mjguzik@...il.com,
andrii@...nel.org,
wangfushuai@...du.com,
linux-doc@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-media@...r.kernel.org,
linaro-mm-sig@...ts.linaro.org,
linux-i2c@...r.kernel.org,
linux-arch@...r.kernel.org,
linux-modules@...r.kernel.org,
rcu@...r.kernel.org,
linux-nfs@...r.kernel.org,
linux-rt-devel@...ts.linux.dev
Subject: [PATCH v17 25/47] dept: track PG_locked with dept
Makes dept able to track PG_locked waits and events, which will be
useful in practice. See the following link that shows dept worked with
PG_locked and detected real issues in practice:
https://lore.kernel.org/lkml/1674268856-31807-1-git-send-email-byungchul.park@lge.com/
Signed-off-by: Byungchul Park <byungchul@...com>
---
include/linux/mm_types.h | 2 +
include/linux/page-flags.h | 125 +++++++++++++++++++++++++++++++++----
include/linux/pagemap.h | 37 ++++++++++-
mm/filemap.c | 26 ++++++++
mm/mm_init.c | 2 +
5 files changed, 179 insertions(+), 13 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index a643fae8a349..5ebc565309af 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -20,6 +20,7 @@
#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
#include <linux/types.h>
+#include <linux/dept.h>
#include <asm/mmu.h>
@@ -223,6 +224,7 @@ struct page {
struct page *kmsan_shadow;
struct page *kmsan_origin;
#endif
+ struct dept_ext_wgen pg_locked_wgen;
} _struct_page_alignment;
/*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 8d3fa3a91ce4..d3c4954c4218 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -198,6 +198,61 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
+#ifdef CONFIG_DEPT
+#include <linux/kernel.h>
+#include <linux/dept.h>
+
+extern struct dept_map pg_locked_map;
+
+/*
+ * Place the following annotations in its suitable point in code:
+ *
+ * Annotate dept_page_set_bit() around firstly set_bit*()
+ * Annotate dept_page_clear_bit() around clear_bit*()
+ * Annotate dept_page_wait_on_bit() around wait_on_bit*()
+ */
+
+static inline void dept_page_set_bit(struct page *p, int bit_nr)
+{
+ if (bit_nr == PG_locked)
+ dept_request_event(&pg_locked_map, &p->pg_locked_wgen);
+}
+
+static inline void dept_page_clear_bit(struct page *p, int bit_nr)
+{
+ if (bit_nr == PG_locked)
+ dept_event(&pg_locked_map, 1UL, _RET_IP_, __func__, &p->pg_locked_wgen);
+}
+
+static inline void dept_page_wait_on_bit(struct page *p, int bit_nr)
+{
+ if (bit_nr == PG_locked)
+ dept_wait(&pg_locked_map, 1UL, _RET_IP_, __func__, 0, -1L);
+}
+
+static inline void dept_folio_set_bit(struct folio *f, int bit_nr)
+{
+ dept_page_set_bit(&f->page, bit_nr);
+}
+
+static inline void dept_folio_clear_bit(struct folio *f, int bit_nr)
+{
+ dept_page_clear_bit(&f->page, bit_nr);
+}
+
+static inline void dept_folio_wait_on_bit(struct folio *f, int bit_nr)
+{
+ dept_page_wait_on_bit(&f->page, bit_nr);
+}
+#else
+#define dept_page_set_bit(p, bit_nr) do { } while (0)
+#define dept_page_clear_bit(p, bit_nr) do { } while (0)
+#define dept_page_wait_on_bit(p, bit_nr) do { } while (0)
+#define dept_folio_set_bit(f, bit_nr) do { } while (0)
+#define dept_folio_clear_bit(f, bit_nr) do { } while (0)
+#define dept_folio_wait_on_bit(f, bit_nr) do { } while (0)
+#endif
+
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
@@ -419,27 +474,51 @@ static __always_inline bool folio_test_##name(const struct folio *folio) \
#define FOLIO_SET_FLAG(name, page) \
static __always_inline void folio_set_##name(struct folio *folio) \
-{ set_bit(PG_##name, folio_flags(folio, page)); }
+{ \
+ set_bit(PG_##name, folio_flags(folio, page)); \
+ dept_folio_set_bit(folio, PG_##name); \
+}
#define FOLIO_CLEAR_FLAG(name, page) \
static __always_inline void folio_clear_##name(struct folio *folio) \
-{ clear_bit(PG_##name, folio_flags(folio, page)); }
+{ \
+ clear_bit(PG_##name, folio_flags(folio, page)); \
+ dept_folio_clear_bit(folio, PG_##name); \
+}
#define __FOLIO_SET_FLAG(name, page) \
static __always_inline void __folio_set_##name(struct folio *folio) \
-{ __set_bit(PG_##name, folio_flags(folio, page)); }
+{ \
+ __set_bit(PG_##name, folio_flags(folio, page)); \
+ dept_folio_set_bit(folio, PG_##name); \
+}
#define __FOLIO_CLEAR_FLAG(name, page) \
static __always_inline void __folio_clear_##name(struct folio *folio) \
-{ __clear_bit(PG_##name, folio_flags(folio, page)); }
+{ \
+ __clear_bit(PG_##name, folio_flags(folio, page)); \
+ dept_folio_clear_bit(folio, PG_##name); \
+}
#define FOLIO_TEST_SET_FLAG(name, page) \
static __always_inline bool folio_test_set_##name(struct folio *folio) \
-{ return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
+{ \
+ bool __ret = test_and_set_bit(PG_##name, folio_flags(folio, page)); \
+ \
+ if (!__ret) \
+ dept_folio_set_bit(folio, PG_##name); \
+ return __ret; \
+}
#define FOLIO_TEST_CLEAR_FLAG(name, page) \
static __always_inline bool folio_test_clear_##name(struct folio *folio) \
-{ return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
+{ \
+ bool __ret = test_and_clear_bit(PG_##name, folio_flags(folio, page)); \
+ \
+ if (__ret) \
+ dept_folio_clear_bit(folio, PG_##name); \
+ return __ret; \
+}
#define FOLIO_FLAG(name, page) \
FOLIO_TEST_FLAG(name, page) \
@@ -454,32 +533,54 @@ static __always_inline int Page##uname(const struct page *page) \
#define SETPAGEFLAG(uname, lname, policy) \
FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
-{ set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ \
+ set_bit(PG_##lname, &policy(page, 1)->flags); \
+ dept_page_set_bit(page, PG_##lname); \
+}
#define CLEARPAGEFLAG(uname, lname, policy) \
FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
-{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ \
+ clear_bit(PG_##lname, &policy(page, 1)->flags); \
+ dept_page_clear_bit(page, PG_##lname); \
+}
#define __SETPAGEFLAG(uname, lname, policy) \
__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
-{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ \
+ __set_bit(PG_##lname, &policy(page, 1)->flags); \
+ dept_page_set_bit(page, PG_##lname); \
+}
#define __CLEARPAGEFLAG(uname, lname, policy) \
__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
-{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ \
+ __clear_bit(PG_##lname, &policy(page, 1)->flags); \
+ dept_page_clear_bit(page, PG_##lname); \
+}
#define TESTSETFLAG(uname, lname, policy) \
FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
-{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ \
+ bool ret = test_and_set_bit(PG_##lname, &policy(page, 1)->flags);\
+ if (!ret) \
+ dept_page_set_bit(page, PG_##lname); \
+ return ret; \
+}
#define TESTCLEARFLAG(uname, lname, policy) \
FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
-{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ \
+ bool ret = test_and_clear_bit(PG_##lname, &policy(page, 1)->flags);\
+ if (ret) \
+ dept_page_clear_bit(page, PG_##lname); \
+ return ret; \
+}
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 12a12dae727d..53b68b7a3f17 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1093,7 +1093,12 @@ void folio_unlock(struct folio *folio);
*/
static inline bool folio_trylock(struct folio *folio)
{
- return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+ bool ret = !test_and_set_bit_lock(PG_locked, folio_flags(folio, 0));
+
+ if (ret)
+ dept_page_set_bit(&folio->page, PG_locked);
+
+ return likely(ret);
}
/*
@@ -1129,6 +1134,16 @@ static inline bool trylock_page(struct page *page)
static inline void folio_lock(struct folio *folio)
{
might_sleep();
+
+ /*
+ * dept_page_wait_on_bit() will be called if __folio_lock() goes
+ * through a real wait path. However, for better job to detect
+ * *potential* deadlocks, let's assume that folio_lock() always
+ * goes through wait so that dept can take into account all the
+ * potential cases.
+ */
+ dept_page_wait_on_bit(&folio->page, PG_locked);
+
if (!folio_trylock(folio))
__folio_lock(folio);
}
@@ -1149,6 +1164,15 @@ static inline void lock_page(struct page *page)
struct folio *folio;
might_sleep();
+ /*
+ * dept_page_wait_on_bit() will be called if __folio_lock() goes
+ * through a real wait path. However, for better job to detect
+ * *potential* deadlocks, let's assume that lock_page() always
+ * goes through wait so that dept can take into account all the
+ * potential cases.
+ */
+ dept_page_wait_on_bit(page, PG_locked);
+
folio = page_folio(page);
if (!folio_trylock(folio))
__folio_lock(folio);
@@ -1167,6 +1191,17 @@ static inline void lock_page(struct page *page)
static inline int folio_lock_killable(struct folio *folio)
{
might_sleep();
+
+ /*
+ * dept_page_wait_on_bit() will be called if
+ * __folio_lock_killable() goes through a real wait path.
+ * However, for better job to detect *potential* deadlocks,
+ * let's assume that folio_lock_killable() always goes through
+ * wait so that dept can take into account all the potential
+ * cases.
+ */
+ dept_page_wait_on_bit(&folio->page, PG_locked);
+
if (!folio_trylock(folio))
return __folio_lock_killable(folio);
return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index 751838ef05e5..edb0710ddb3f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -48,6 +48,7 @@
#include <linux/rcupdate_wait.h>
#include <linux/sched/mm.h>
#include <linux/sysctl.h>
+#include <linux/dept.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -1145,6 +1146,7 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
if (flags & WQ_FLAG_CUSTOM) {
if (test_and_set_bit(key->bit_nr, &key->folio->flags))
return -1;
+ dept_page_set_bit(&key->folio->page, key->bit_nr);
flags |= WQ_FLAG_DONE;
}
}
@@ -1228,6 +1230,7 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
if (wait->flags & WQ_FLAG_EXCLUSIVE) {
if (test_and_set_bit(bit_nr, &folio->flags))
return false;
+ dept_page_set_bit(&folio->page, bit_nr);
} else if (test_bit(bit_nr, &folio->flags))
return false;
@@ -1235,6 +1238,9 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
return true;
}
+struct dept_map __maybe_unused pg_locked_map = DEPT_MAP_INITIALIZER(pg_locked_map, NULL);
+EXPORT_SYMBOL(pg_locked_map);
+
static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
int state, enum behavior behavior)
{
@@ -1246,6 +1252,8 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
unsigned long pflags;
bool in_thrashing;
+ dept_page_wait_on_bit(&folio->page, bit_nr);
+
if (bit_nr == PG_locked &&
!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
delayacct_thrashing_start(&in_thrashing);
@@ -1339,6 +1347,23 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
break;
}
+ /*
+ * dept_page_set_bit() might have been called already in
+ * folio_trylock_flag(), wake_page_function() or somewhere.
+ * However, call it again to reset the wgen of dept to ensure
+ * dept_page_wait_on_bit() is called prior to
+ * dept_page_set_bit().
+ *
+ * Remind dept considers all the waits between
+ * dept_page_set_bit() and dept_page_clear_bit() as potential
+ * event disturbers. Ensure the correct sequence so that dept
+ * can make correct decisions:
+ *
+ * wait -> acquire(set bit) -> release(clear bit)
+ */
+ if (wait->flags & WQ_FLAG_DONE)
+ dept_page_set_bit(&folio->page, bit_nr);
+
/*
* If a signal happened, this 'finish_wait()' may remove the last
* waiter from the wait-queues, but the folio waiters bit will remain
@@ -1496,6 +1521,7 @@ void folio_unlock(struct folio *folio)
BUILD_BUG_ON(PG_waiters != 7);
BUILD_BUG_ON(PG_locked > 7);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ dept_page_clear_bit(&folio->page, PG_locked);
if (folio_xor_flags_has_waiters(folio, 1 << PG_locked))
folio_wake_bit(folio, PG_locked);
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 5c21b3af216b..09e4ac6a73c7 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -32,6 +32,7 @@
#include <linux/vmstat.h>
#include <linux/kexec_handover.h>
#include <linux/hugetlb.h>
+#include <linux/dept.h>
#include "internal.h"
#include "slab.h"
#include "shuffle.h"
@@ -587,6 +588,7 @@ void __meminit __init_single_page(struct page *page, unsigned long pfn,
atomic_set(&page->_mapcount, -1);
page_cpupid_reset_last(page);
page_kasan_tag_reset(page);
+ dept_ext_wgen_init(&page->pg_locked_wgen);
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
--
2.17.1
Powered by blists - more mailing lists