[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211117012059.141450-3-pasha.tatashin@soleen.com>
Date: Wed, 17 Nov 2021 01:20:51 +0000
From: Pasha Tatashin <pasha.tatashin@...een.com>
To: pasha.tatashin@...een.com, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-m68k@...ts.linux-m68k.org,
anshuman.khandual@....com, willy@...radead.org,
akpm@...ux-foundation.org, william.kucharski@...cle.com,
mike.kravetz@...cle.com, vbabka@...e.cz, geert@...ux-m68k.org,
schmitzmic@...il.com, rostedt@...dmis.org, mingo@...hat.com,
hannes@...xchg.org, guro@...com, songmuchun@...edance.com,
weixugc@...gle.com, gthelen@...gle.com, rientjes@...gle.com,
pjt@...gle.com
Subject: [RFC v2 02/10] mm: add overflow and underflow checks for page->_refcount
The problems with page->_refcount are hard to debug, because usually
when they are detected, the damage has occurred a long time ago. Yet,
the problems with invalid page refcount may be catastrophic and lead to
memory corruptions.
Reduce the scope of when the _refcount problems manifest themselves by
adding checks for underflows and overflows into functions that modify
_refcount.
Use atomic_fetch_* functions to get the old values of the _refcount,
and use it to check for overflow/underflow.
Signed-off-by: Pasha Tatashin <pasha.tatashin@...een.com>
---
include/linux/page_ref.h | 59 +++++++++++++++++++++++++++++-----------
1 file changed, 43 insertions(+), 16 deletions(-)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 1903af5fb087..f3c61dc6344a 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -117,7 +117,10 @@ static inline void init_page_count(struct page *page)
static inline void page_ref_add(struct page *page, int nr)
{
- atomic_add(nr, &page->_refcount);
+ int old_val = atomic_fetch_add(nr, &page->_refcount);
+ int new_val = old_val + nr;
+
+ VM_BUG_ON_PAGE((unsigned int)new_val < (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, nr);
}
@@ -129,7 +132,10 @@ static inline void folio_ref_add(struct folio *folio, int nr)
static inline void page_ref_sub(struct page *page, int nr)
{
- atomic_sub(nr, &page->_refcount);
+ int old_val = atomic_fetch_sub(nr, &page->_refcount);
+ int new_val = old_val - nr;
+
+ VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -nr);
}
@@ -141,11 +147,13 @@ static inline void folio_ref_sub(struct folio *folio, int nr)
static inline int page_ref_sub_return(struct page *page, int nr)
{
- int ret = atomic_sub_return(nr, &page->_refcount);
+ int old_val = atomic_fetch_sub(nr, &page->_refcount);
+ int new_val = old_val - nr;
+ VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, -nr, ret);
- return ret;
+ __page_ref_mod_and_return(page, -nr, new_val);
+ return new_val;
}
static inline int folio_ref_sub_return(struct folio *folio, int nr)
@@ -155,7 +163,10 @@ static inline int folio_ref_sub_return(struct folio *folio, int nr)
static inline void page_ref_inc(struct page *page)
{
- atomic_inc(&page->_refcount);
+ int old_val = atomic_fetch_inc(&page->_refcount);
+ int new_val = old_val + 1;
+
+ VM_BUG_ON_PAGE((unsigned int)new_val < (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, 1);
}
@@ -167,7 +178,10 @@ static inline void folio_ref_inc(struct folio *folio)
static inline void page_ref_dec(struct page *page)
{
- atomic_dec(&page->_refcount);
+ int old_val = atomic_fetch_dec(&page->_refcount);
+ int new_val = old_val - 1;
+
+ VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -1);
}
@@ -179,8 +193,11 @@ static inline void folio_ref_dec(struct folio *folio)
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
- int ret = atomic_sub_and_test(nr, &page->_refcount);
+ int old_val = atomic_fetch_sub(nr, &page->_refcount);
+ int new_val = old_val - nr;
+ int ret = new_val == 0;
+ VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
return ret;
@@ -193,11 +210,13 @@ static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
static inline int page_ref_inc_return(struct page *page)
{
- int ret = atomic_inc_return(&page->_refcount);
+ int old_val = atomic_fetch_inc(&page->_refcount);
+ int new_val = old_val + 1;
+ VM_BUG_ON_PAGE((unsigned int)new_val < (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, 1, ret);
- return ret;
+ __page_ref_mod_and_return(page, 1, new_val);
+ return new_val;
}
static inline int folio_ref_inc_return(struct folio *folio)
@@ -207,8 +226,11 @@ static inline int folio_ref_inc_return(struct folio *folio)
static inline int page_ref_dec_and_test(struct page *page)
{
- int ret = atomic_dec_and_test(&page->_refcount);
+ int old_val = atomic_fetch_dec(&page->_refcount);
+ int new_val = old_val - 1;
+ int ret = new_val == 0;
+ VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
return ret;
@@ -221,11 +243,13 @@ static inline int folio_ref_dec_and_test(struct folio *folio)
static inline int page_ref_dec_return(struct page *page)
{
- int ret = atomic_dec_return(&page->_refcount);
+ int old_val = atomic_fetch_dec(&page->_refcount);
+ int new_val = old_val - 1;
+ VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, -1, ret);
- return ret;
+ __page_ref_mod_and_return(page, -1, new_val);
+ return new_val;
}
static inline int folio_ref_dec_return(struct folio *folio)
@@ -235,8 +259,11 @@ static inline int folio_ref_dec_return(struct folio *folio)
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- bool ret = atomic_add_unless(&page->_refcount, nr, u);
+ int old_val = atomic_fetch_add_unless(&page->_refcount, nr, u);
+ int new_val = old_val + nr;
+ int ret = old_val != u;
+ VM_BUG_ON_PAGE(ret && (unsigned int)new_val < (unsigned int)old_val, page);
if (page_ref_tracepoint_active(page_ref_add_unless))
__page_ref_add_unless(page, nr, u, ret);
return ret;
--
2.34.0.rc1.387.gb447b232ab-goog
Powered by blists - more mailing lists