[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <81e3c45f49bdac231e831ec7ba09ef42fbb77930.1766145604.git.gladyshev.ilya1@h-partners.com>
Date: Fri, 19 Dec 2025 12:46:39 +0000
From: Gladyshev Ilya <gladyshev.ilya1@...artners.com>
To: <patchwork@...wei.com>
CC: <guohanjun@...wei.com>, <wangkefeng.wang@...wei.com>,
<weiyongjun1@...wei.com>, <yusongping@...wei.com>, <leijitang@...wei.com>,
<artem.kuzin@...wei.com>, <stepanov.anatoly@...wei.com>,
<alexander.grubnikov@...wei.com>, <gorbunov.ivan@...artners.com>,
<akpm@...ux-foundation.org>, <david@...nel.org>,
<lorenzo.stoakes@...cle.com>, <Liam.Howlett@...cle.com>, <vbabka@...e.cz>,
<rppt@...nel.org>, <surenb@...gle.com>, <mhocko@...e.com>, <ziy@...dia.com>,
<harry.yoo@...cle.com>, <willy@...radead.org>,
<gladyshev.ilya1@...artners.com>, <yuzhao@...gle.com>,
<baolin.wang@...ux.alibaba.com>, <muchun.song@...ux.dev>,
<linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>
Subject: [RFC PATCH 2/2] mm: implement page refcount locking via dedicated bit
The current atomic-based page refcount implementation treats zero
counter as dead and requires a compare-and-swap loop in folio_try_get()
to prevent incrementing a dead refcount. This CAS loop acts as a
serialization point and can become a significant bottleneck during
high-frequency file read operations.
This patch introduces FOLIO_LOCKED_BIT to distinguish between a
(temporary) zero refcount and a locked (dead/frozen) state. Because now
incrementing counter doesn't affect it's locked/unlocked state, it is
possible to use an optimistic atomic_fetch_add() in
page_ref_add_unless_zero() that operates independently of the locked bit.
The locked state is handled after the increment attempt, eliminating the
need for the CAS loop.
Co-developed-by: Gorbunov Ivan <gorbunov.ivan@...artners.com>
Signed-off-by: Gorbunov Ivan <gorbunov.ivan@...artners.com>
Signed-off-by: Gladyshev Ilya <gladyshev.ilya1@...artners.com>
---
include/linux/page-flags.h | 5 ++++-
include/linux/page_ref.h | 25 +++++++++++++++++++++----
2 files changed, 25 insertions(+), 5 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 7c2195baf4c1..f2a9302104eb 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -196,6 +196,9 @@ enum pageflags {
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
+/* Most significant bit in page refcount */
+#define PAGEREF_LOCKED_BIT (1 << 31)
+
#ifndef __GENERATING_BOUNDS_H
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
@@ -257,7 +260,7 @@ static __always_inline bool page_count_writable(const struct page *page)
* The refcount check also prevents modification attempts to other (r/o)
* tail pages that are not fake heads.
*/
- if (!atomic_read_acquire(&page->_refcount))
+ if (atomic_read_acquire(&page->_refcount) & PAGEREF_LOCKED_BIT)
return false;
return page_fixed_fake_head(page) == page;
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index b0e3f4a4b4b8..98717fd25306 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -64,7 +64,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
static inline int page_ref_count(const struct page *page)
{
- return atomic_read(&page->_refcount);
+ int val = atomic_read(&page->_refcount);
+
+ if (unlikely(val & PAGEREF_LOCKED_BIT))
+ return 0;
+
+ return val;
}
/**
@@ -176,6 +181,9 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
+ if (ret)
+ ret = !atomic_cmpxchg_relaxed(&page->_refcount, 0, PAGEREF_LOCKED_BIT);
+
if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
return ret;
@@ -204,6 +212,9 @@ static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
+ if (ret)
+ ret = !atomic_cmpxchg_relaxed(&page->_refcount, 0, PAGEREF_LOCKED_BIT);
+
if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
return ret;
@@ -231,11 +242,17 @@ static inline int folio_ref_dec_return(struct folio *folio)
static inline bool page_ref_add_unless_zero(struct page *page, int nr)
{
bool ret = false;
+ int val;
rcu_read_lock();
/* avoid writing to the vmemmap area being remapped */
- if (page_count_writable(page))
- ret = atomic_add_unless(&page->_refcount, nr, 0);
+ if (page_count_writable(page)) {
+ val = atomic_add_return(nr, &page->_refcount);
+ ret = !(val & PAGEREF_LOCKED_BIT);
+
+ if (unlikely(!ret))
+ atomic_cmpxchg_relaxed(&page->_refcount, val, PAGEREF_LOCKED_BIT);
+ }
rcu_read_unlock();
if (page_ref_tracepoint_active(page_ref_mod_unless))
@@ -271,7 +288,7 @@ static inline bool folio_ref_try_add(struct folio *folio, int count)
static inline int page_ref_freeze(struct page *page, int count)
{
- int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
+ int ret = likely(atomic_cmpxchg(&page->_refcount, count, PAGEREF_LOCKED_BIT) == count);
if (page_ref_tracepoint_active(page_ref_freeze))
__page_ref_freeze(page, count, ret);
--
2.43.0
Powered by blists - more mailing lists