[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220427042841.678351-2-naoya.horiguchi@linux.dev>
Date: Wed, 27 Apr 2022 13:28:38 +0900
From: Naoya Horiguchi <naoya.horiguchi@...ux.dev>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Miaohe Lin <linmiaohe@...wei.com>,
David Hildenbrand <david@...hat.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Yang Shi <shy828301@...il.com>,
Oscar Salvador <osalvador@...e.de>,
Muchun Song <songmuchun@...edance.com>,
Naoya Horiguchi <naoya.horiguchi@....com>,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH v1 1/4] mm, hwpoison, hugetlb: introduce SUBPAGE_INDEX_HWPOISON to save raw error page
From: Naoya Horiguchi <naoya.horiguchi@....com>
When handling memory error on a hugetlb page, the error handler tries to
dissolve and turn it into 4kB pages. If it's successfully dissolved,
PageHWPoison flag is moved to the raw error page, so but that's all
right. However, dissolve sometimes fails, then the error page is left
as hwpoisoned hugepage. It's useful if we can retry to dissolve it to
save healthy pages, but that's not possible now because the information
about where the raw error page is lost.
Use the private field of a tail page to keep that information. The code
path of shrinking hugepage pool used this info to try delayed dissolve.
Signed-off-by: Naoya Horiguchi <naoya.horiguchi@....com>
---
include/linux/hugetlb.h | 24 ++++++++++++++++++++++++
mm/hugetlb.c | 9 +++++++++
mm/memory-failure.c | 2 ++
3 files changed, 35 insertions(+)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ac2a1d758a80..689e69cb556b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -42,6 +42,9 @@ enum {
SUBPAGE_INDEX_CGROUP, /* reuse page->private */
SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
+#endif
+#ifdef CONFIG_CGROUP_HUGETLB
+ SUBPAGE_INDEX_HWPOISON,
#endif
__NR_USED_SUBPAGE,
};
@@ -784,6 +787,27 @@ extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * pointer to raw error page is located in hpage[SUBPAGE_INDEX_HWPOISON].private
+ */
+static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
+{
+ return (void *)page_private(hpage + SUBPAGE_INDEX_HWPOISON);
+}
+
+static inline void hugetlb_set_page_hwpoison(struct page *hpage,
+ struct page *page)
+{
+ set_page_private(hpage + SUBPAGE_INDEX_HWPOISON, (unsigned long)page);
+}
+#else
+static inline struct page *hugetlb_page_hwpoison(struct page *hpage)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8e048b939c7..6867ea8345d1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1547,6 +1547,15 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
return;
}
+ if (unlikely(PageHWPoison(page))) {
+ struct page *raw_error = hugetlb_page_hwpoison(page);
+
+ if (raw_error && raw_error != page) {
+ SetPageHWPoison(raw_error);
+ ClearPageHWPoison(page);
+ }
+ }
+
for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3e36fc19c4d1..73948a00ad4a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1535,6 +1535,8 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
goto out;
}
+ hugetlb_set_page_hwpoison(head, page);
+
return ret;
out:
if (count_increased)
--
2.25.1
Powered by blists - more mailing lists