[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210607195430.48228-8-david@redhat.com>
Date: Mon, 7 Jun 2021 21:54:25 +0200
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: David Hildenbrand <david@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Marek Kedzierski <mkedzier@...hat.com>,
Hui Zhu <teawater@...il.com>,
Pankaj Gupta <pankaj.gupta.linux@...il.com>,
Wei Yang <richard.weiyang@...ux.alibaba.com>,
Oscar Salvador <osalvador@...e.de>,
Michal Hocko <mhocko@...nel.org>,
Dan Williams <dan.j.williams@...el.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
"Rafael J. Wysocki" <rjw@...ysocki.net>,
Len Brown <lenb@...nel.org>,
Pavel Tatashin <pasha.tatashin@...een.com>,
virtualization@...ts.linux-foundation.org, linux-mm@...ck.org,
linux-acpi@...r.kernel.org
Subject: [PATCH v1 07/12] mm/memory_hotplug: track present pages in memory groups
Let's track all present pages in each memory group. Especially, track
memory present in ZONE_MOVABLE and memory present in one of the kernel
zones (which really only is ZONE_NORMAL right now as memory groups only
apply to hotplugged memory) separate;y within a memory group, to prepare
for making smart auto-online decision for individualmemory blocks within a
memory group based on group statistics.
Signed-off-by: David Hildenbrand <david@...hat.com>
---
drivers/base/memory.c | 10 +++++-----
include/linux/memory.h | 4 ++++
include/linux/memory_hotplug.h | 13 +++++++++----
mm/memory_hotplug.c | 19 ++++++++++++++-----
4 files changed, 32 insertions(+), 14 deletions(-)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 00c58a6632a6..d8ea448e5fb8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -198,7 +198,7 @@ static int memory_block_online(struct memory_block *mem)
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, zone);
+ nr_pages - nr_vmemmap_pages, zone, mem->group);
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
@@ -210,7 +210,7 @@ static int memory_block_online(struct memory_block *mem)
* now already properly populated.
*/
if (nr_vmemmap_pages)
- adjust_present_page_count(pfn_to_page(start_pfn),
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
return ret;
@@ -228,16 +228,16 @@ static int memory_block_offline(struct memory_block *mem)
* can properly be torn down in offline_pages().
*/
if (nr_vmemmap_pages)
- adjust_present_page_count(pfn_to_page(start_pfn),
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages);
+ nr_pages - nr_vmemmap_pages, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn),
- nr_vmemmap_pages);
+ mem->group, nr_vmemmap_pages);
return ret;
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 6e20a6174fe5..0eceb8467d9a 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -28,6 +28,10 @@ struct memory_group {
int nid;
/* References from memory blocks + 1. */
refcount_t refcount;
+ /* Present (online) memory outside ZONE_MOVABLE of this memory group. */
+ unsigned long present_kernel_pages;
+ /* Present (online) memory in ZONE_MOVABLE of this memory group. */
+ unsigned long present_movable_pages;
/*
* Memory group type: static vs. dynamic.
*
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 5c910dc2526a..f607d6677873 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -12,6 +12,7 @@ struct zone;
struct pglist_data;
struct mem_section;
struct memory_block;
+struct memory_group;
struct resource;
struct vmem_altmap;
@@ -111,13 +112,15 @@ static inline void zone_seqlock_init(struct zone *zone)
extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
-extern void adjust_present_page_count(struct page *page, long nr_pages);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
struct zone *zone);
extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
- struct zone *zone);
+ struct zone *zone, struct memory_group *group);
extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
unsigned long end_pfn);
extern void __offline_isolated_pages(unsigned long start_pfn,
@@ -322,7 +325,8 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct memory_group *group);
extern int remove_memory(u64 start, u64 size);
extern void __remove_memory(u64 start, u64 size);
extern int offline_and_remove_memory(u64 start, u64 size);
@@ -330,7 +334,8 @@ extern int offline_and_remove_memory(u64 start, u64 size);
#else
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct memory_group *group)
{
return -EINVAL;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 4e039c82e7b6..5dacb0ed2997 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1039,9 +1039,11 @@ struct zone *zone_for_pfn_range(int online_type, int nid,
* This function should only be called by memory_block_{online,offline},
* and {online,offline}_pages.
*/
-void adjust_present_page_count(struct page *page, long nr_pages)
+void adjust_present_page_count(struct page *page, struct memory_group *group,
+ long nr_pages)
{
struct zone *zone = page_zone(page);
+ const bool movable = zone_idx(zone) == ZONE_MOVABLE;
unsigned long flags;
/*
@@ -1054,6 +1056,11 @@ void adjust_present_page_count(struct page *page, long nr_pages)
pgdat_resize_lock(zone->zone_pgdat, &flags);
zone->zone_pgdat->node_present_pages += nr_pages;
pgdat_resize_unlock(zone->zone_pgdat, &flags);
+
+ if (group && movable)
+ group->present_movable_pages += nr_pages;
+ else if (group && !movable)
+ group->present_kernel_pages += nr_pages;
}
int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
@@ -1099,7 +1106,8 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
}
-int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone)
+int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
unsigned long flags;
int need_zonelists_rebuild = 0;
@@ -1152,7 +1160,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
}
online_pages_range(pfn, nr_pages);
- adjust_present_page_count(pfn_to_page(pfn), nr_pages);
+ adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
node_states_set_node(nid, &arg);
if (need_zonelists_rebuild)
@@ -1896,7 +1904,8 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
return 0;
}
-int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct memory_group *group)
{
const unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn, system_ram_pages = 0;
@@ -2032,7 +2041,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
/* removal success */
adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
- adjust_present_page_count(pfn_to_page(start_pfn), -nr_pages);
+ adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
init_per_zone_wmark_min();
--
2.31.1
Powered by blists - more mailing lists