[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200915125947.26204-9-songmuchun@bytedance.com>
Date: Tue, 15 Sep 2020 20:59:31 +0800
From: Muchun Song <songmuchun@...edance.com>
To: corbet@....net, mike.kravetz@...cle.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
viro@...iv.linux.org.uk, akpm@...ux-foundation.org,
paulmck@...nel.org, mchehab+huawei@...nel.org,
pawan.kumar.gupta@...ux.intel.com, rdunlap@...radead.org,
oneukum@...e.com, anshuman.khandual@....com, jroedel@...e.de,
almasrymina@...gle.com, rientjes@...gle.com
Cc: linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [RFC PATCH 08/24] mm/bootmem_info: Introduce {free,prepare}_vmemmap_page()
In the later patch, we can use the free_vmemmap_page() to free the
unused vmemmap pages and initialize a page for vmemmap page using
via prepare_vmemmap_page().
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
include/linux/bootmem_info.h | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index 4ed6dee1adc9..ce9d8c97369d 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -3,6 +3,7 @@
#define __LINUX_BOOTMEM_INFO_H
#include <linux/mmzone.h>
+#include <linux/mm.h>
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -22,6 +23,30 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
void get_page_bootmem(unsigned long info, struct page *page,
unsigned long type);
void put_page_bootmem(struct page *page);
+
+static inline void free_vmemmap_page(struct page *page)
+{
+ VM_WARN_ON(!PageReserved(page) || page_ref_count(page) != 2);
+
+ /* bootmem page has reserved flag in the reserve_bootmem_region */
+ if (PageReserved(page)) {
+ unsigned long magic = (unsigned long)page->freelist;
+
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ WARN_ON(1);
+ }
+}
+
+static inline void prepare_vmemmap_page(struct page *page)
+{
+ unsigned long section_nr = pfn_to_section_nr(page_to_pfn(page));
+
+ get_page_bootmem(section_nr, page, SECTION_INFO);
+ __SetPageReserved(page);
+ adjust_managed_page_count(page, -1);
+}
#else
static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
--
2.20.1
Powered by blists - more mailing lists