[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250724084441.380404-2-link@vivo.com>
Date: Thu, 24 Jul 2025 16:44:29 +0800
From: Huan Yang <link@...o.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Rik van Riel <riel@...riel.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Harry Yoo <harry.yoo@...cle.com>,
Xu Xin <xu.xin16@....com.cn>,
Chengming Zhou <chengming.zhou@...ux.dev>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Zi Yan <ziy@...dia.com>,
Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>,
Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>,
Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Huan Yang <link@...o.com>,
Christian Brauner <brauner@...nel.org>,
Usama Arif <usamaarif642@...il.com>,
Yu Zhao <yuzhao@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [RFC PATCH 1/9] mm: introduce PAGE_TYPE_SHIFT
The current shift value for page_type is 24. To avoid hardcode, define
a macro called PAGE_TYPE_SHIFT and PAGE_TYPE_MASK.
No functional change.
Signed-off-by: Huan Yang <link@...o.com>
---
include/linux/page-flags.h | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4fe5ee67535b..3c7103c2eee4 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -961,9 +961,12 @@ enum pagetype {
PGTY_mapcount_underflow = 0xff
};
+#define PAGE_TYPE_SHIFT 24
+#define PAGE_TYPE_MASK ((1 << PAGE_TYPE_SHIFT) - 1)
+
static inline bool page_type_has_type(int page_type)
{
- return page_type < (PGTY_mapcount_underflow << 24);
+ return page_type < (PGTY_mapcount_underflow << PAGE_TYPE_SHIFT);
}
/* This takes a mapcount which is one more than page->_mapcount */
@@ -980,7 +983,8 @@ static inline bool page_has_type(const struct page *page)
#define FOLIO_TYPE_OPS(lname, fname) \
static __always_inline bool folio_test_##fname(const struct folio *folio) \
{ \
- return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
+ return data_race(folio->page.page_type >> PAGE_TYPE_SHIFT) \
+ == PGTY_##lname; \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
@@ -988,7 +992,8 @@ static __always_inline void __folio_set_##fname(struct folio *folio) \
return; \
VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
folio); \
- folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
+ folio->page.page_type = (unsigned int)PGTY_##lname \
+ << PAGE_TYPE_SHIFT; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
@@ -1002,14 +1007,16 @@ static __always_inline void __folio_clear_##fname(struct folio *folio) \
FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
- return data_race(page->page_type >> 24) == PGTY_##lname; \
+ return data_race(page->page_type >> PAGE_TYPE_SHIFT) \
+ == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
if (Page##uname(page)) \
return; \
VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
- page->page_type = (unsigned int)PGTY_##lname << 24; \
+ page->page_type = (unsigned int)PGTY_##lname \
+ << PAGE_TYPE_SHIFT; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
--
2.34.1
Powered by blists - more mailing lists