[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221213030557.143432-1-wangkefeng.wang@huawei.com>
Date: Tue, 13 Dec 2022 11:05:57 +0800
From: Kefeng Wang <wangkefeng.wang@...wei.com>
To: <naoya.horiguchi@....com>, <akpm@...ux-foundation.org>,
<linux-mm@...ck.org>
CC: <tony.luck@...el.com>, <linux-kernel@...r.kernel.org>,
<linmiaohe@...wei.com>, David Hildenbrand <david@...hat.com>,
Kefeng Wang <wangkefeng.wang@...wei.com>
Subject: [PATCH -next v3] mm: hwposion: support recovery from ksm_might_need_to_copy()
When the kernel copy a page from ksm_might_need_to_copy(), but runs
into an uncorrectable error, it will crash since poisoned page is
consumed by kernel, this is similar to Copy-on-write poison recovery,
When an error is detected during the page copy, return VM_FAULT_HWPOISON
in do_swap_page(), and install a hwpoison entry in unuse_pte() when
swapoff, which help us to avoid system crash. Note, memory failure on
a KSM page will be skipped, but still call memory_failure_queue() to
be consistent with general memory failure process.
Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
---
mm/ksm.c | 8 ++++++--
mm/memory.c | 3 +++
mm/swapfile.c | 19 +++++++++++++------
3 files changed, 22 insertions(+), 8 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index dd02780c387f..83e2f74ae7da 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2629,8 +2629,12 @@ struct page *ksm_might_need_to_copy(struct page *page,
new_page = NULL;
}
if (new_page) {
- copy_user_highpage(new_page, page, address, vma);
-
+ if (copy_mc_user_highpage(new_page, page, address, vma)) {
+ put_page(new_page);
+ new_page = ERR_PTR(-EHWPOISON);
+ memory_failure_queue(page_to_pfn(page), 0);
+ return new_page;
+ }
SetPageDirty(new_page);
__SetPageUptodate(new_page);
__SetPageLocked(new_page);
diff --git a/mm/memory.c b/mm/memory.c
index aad226daf41b..5b2c137dfb2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3840,6 +3840,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
goto out_page;
+ } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
+ ret = VM_FAULT_HWPOISON;
+ goto out_page;
}
folio = page_folio(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 908a529bca12..06aaca111233 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1763,12 +1763,15 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
struct page *swapcache;
spinlock_t *ptl;
pte_t *pte, new_pte;
+ bool hwposioned = false;
int ret = 1;
swapcache = page;
page = ksm_might_need_to_copy(page, vma, addr);
if (unlikely(!page))
return -ENOMEM;
+ else if (unlikely(PTR_ERR(page) == -EHWPOISON))
+ hwposioned = true;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
@@ -1776,13 +1779,17 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
goto out;
}
- if (unlikely(!PageUptodate(page))) {
- pte_t pteval;
+ if (hwposioned || !PageUptodate(page)) {
+ swp_entry_t swp_entry;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
- pteval = swp_entry_to_pte(make_swapin_error_entry());
- set_pte_at(vma->vm_mm, addr, pte, pteval);
- swap_free(entry);
+ if (hwposioned) {
+ swp_entry = make_hwpoison_entry(swapcache);
+ page = swapcache;
+ } else {
+ swp_entry = make_swapin_error_entry();
+ }
+ new_pte = swp_entry_to_pte(swp_entry);
ret = 0;
goto out;
}
@@ -1816,9 +1823,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
new_pte = pte_mksoft_dirty(new_pte);
if (pte_swp_uffd_wp(*pte))
new_pte = pte_mkuffd_wp(new_pte);
+out:
set_pte_at(vma->vm_mm, addr, pte, new_pte);
swap_free(entry);
-out:
pte_unmap_unlock(pte, ptl);
if (page != swapcache) {
unlock_page(page);
--
2.35.3
Powered by blists - more mailing lists