[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211009093724.10539-4-linyunsheng@huawei.com>
Date: Sat, 9 Oct 2021 17:37:23 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: <davem@...emloft.net>, <kuba@...nel.org>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linuxarm@...neuler.org>, <akpm@...ux-foundation.org>,
<hawk@...nel.org>, <ilias.apalodimas@...aro.org>,
<peterz@...radead.org>, <yuzhao@...gle.com>, <jhubbard@...dia.com>,
<will@...nel.org>, <willy@...radead.org>, <jgg@...pe.ca>,
<mcroce@...rosoft.com>, <willemb@...gle.com>,
<cong.wang@...edance.com>, <pabeni@...hat.com>,
<haokexin@...il.com>, <nogikh@...gle.com>, <elver@...gle.com>,
<memxor@...il.com>, <vvs@...tuozzo.com>, <linux-mm@...ck.org>,
<edumazet@...gle.com>, <alexander.duyck@...il.com>,
<dsahern@...il.com>
Subject: [PATCH net-next -v5 3/4] mm: introduce __get_page() and __put_page()
Introduce __get_page() and __put_page() to operate on the
base page or head of a compound page for the cases when a
page is known to be a base page or head of a compound page.
Signed-off-by: Yunsheng Lin <linyunsheng@...wei.com>
---
include/linux/mm.h | 21 ++++++++++++++-------
mm/swap.c | 6 +++---
2 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 73a52aba448f..5683313c3e9d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -902,7 +902,7 @@ static inline struct page *virt_to_head_page(const void *x)
return compound_head(page);
}
-void __put_page(struct page *page);
+void __put_single_or_compound_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -1203,9 +1203,8 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
#define page_ref_zero_or_close_to_overflow(page) \
((unsigned int) page_ref_count(page) + 127u <= 127u)
-static inline void get_page(struct page *page)
+static inline void __get_page(struct page *page)
{
- page = compound_head(page);
/*
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_refcount.
@@ -1214,6 +1213,11 @@ static inline void get_page(struct page *page)
page_ref_inc(page);
}
+static inline void get_page(struct page *page)
+{
+ __get_page(compound_head(page));
+}
+
bool __must_check try_grab_page(struct page *page, unsigned int flags);
struct page *try_grab_compound_head(struct page *page, int refs,
unsigned int flags);
@@ -1228,10 +1232,8 @@ static inline __must_check bool try_get_page(struct page *page)
return true;
}
-static inline void put_page(struct page *page)
+static inline void __put_page(struct page *page)
{
- page = compound_head(page);
-
/*
* For devmap managed pages we need to catch refcount transition from
* 2 to 1, when refcount reach one it means the page is free and we
@@ -1244,7 +1246,12 @@ static inline void put_page(struct page *page)
}
if (put_page_testzero(page))
- __put_page(page);
+ __put_single_or_compound_page(page);
+}
+
+static inline void put_page(struct page *page)
+{
+ __put_page(compound_head(page));
}
/*
diff --git a/mm/swap.c b/mm/swap.c
index af3cad4e5378..565cbde1caea 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -111,7 +111,7 @@ static void __put_compound_page(struct page *page)
destroy_compound_page(page);
}
-void __put_page(struct page *page)
+void __put_single_or_compound_page(struct page *page)
{
if (is_zone_device_page(page)) {
put_dev_pagemap(page->pgmap);
@@ -128,7 +128,7 @@ void __put_page(struct page *page)
else
__put_single_page(page);
}
-EXPORT_SYMBOL(__put_page);
+EXPORT_SYMBOL(__put_single_or_compound_page);
/**
* put_pages_list() - release a list of pages
@@ -1153,7 +1153,7 @@ void put_devmap_managed_page(struct page *page)
if (count == 1)
free_devmap_managed_page(page);
else if (!count)
- __put_page(page);
+ __put_single_or_compound_page(page);
}
EXPORT_SYMBOL(put_devmap_managed_page);
#endif
--
2.33.0
Powered by blists - more mailing lists