[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220429142206.294714-2-pizhenwei@bytedance.com>
Date: Fri, 29 Apr 2022 22:22:03 +0800
From: zhenwei pi <pizhenwei@...edance.com>
To: akpm@...ux-foundation.org, naoya.horiguchi@....com
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
zhenwei pi <pizhenwei@...edance.com>,
Wu Fengguang <fengguang.wu@...el.com>
Subject: [PATCH 1/4] mm/memory-failure.c: move clear_hwpoisoned_pages
clear_hwpoisoned_pages() clears HWPoison flag and decreases the number
of poisoned pages, this actually works as part of memory failure.
Move this function from sparse.c to memory-failure.c, finally there
is no CONFIG_MEMORY_FAILURE in sparse.c.
Cc: Wu Fengguang <fengguang.wu@...el.com>
Signed-off-by: zhenwei pi <pizhenwei@...edance.com>
---
mm/internal.h | 11 +++++++++++
mm/memory-failure.c | 21 +++++++++++++++++++++
mm/sparse.c | 27 ---------------------------
3 files changed, 32 insertions(+), 27 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index cf16280ce132..e8add8df4e0f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -758,4 +758,15 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
+/*
+ * mm/memory-failure.c
+ */
+#ifdef CONFIG_MEMORY_FAILURE
+void clear_hwpoisoned_pages(struct page *memmap, int nr_pages);
+#else
+static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+{
+}
+#endif
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 27760c19bad7..46d9fb612dcc 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2401,3 +2401,24 @@ int soft_offline_page(unsigned long pfn, int flags)
return ret;
}
+
+void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
+{
+ int i;
+
+ /*
+ * A further optimization is to have per section refcounted
+ * num_poisoned_pages. But that would need more space per memmap, so
+ * for now just do a quick global check to speed up this routine in the
+ * absence of bad pages.
+ */
+ if (atomic_long_read(&num_poisoned_pages) == 0)
+ return;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (PageHWPoison(&memmap[i])) {
+ num_poisoned_pages_dec();
+ ClearPageHWPoison(&memmap[i]);
+ }
+ }
+}
diff --git a/mm/sparse.c b/mm/sparse.c
index 952f06d8f373..e983c38fac8f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -916,33 +916,6 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
return 0;
}
-#ifdef CONFIG_MEMORY_FAILURE
-static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
-{
- int i;
-
- /*
- * A further optimization is to have per section refcounted
- * num_poisoned_pages. But that would need more space per memmap, so
- * for now just do a quick global check to speed up this routine in the
- * absence of bad pages.
- */
- if (atomic_long_read(&num_poisoned_pages) == 0)
- return;
-
- for (i = 0; i < nr_pages; i++) {
- if (PageHWPoison(&memmap[i])) {
- num_poisoned_pages_dec();
- ClearPageHWPoison(&memmap[i]);
- }
- }
-}
-#else
-static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
-{
-}
-#endif
-
void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
unsigned long nr_pages, unsigned long map_offset,
struct vmem_altmap *altmap)
--
2.20.1
Powered by blists - more mailing lists