[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <29b87f61-7e10-449a-21fa-106b4ae3442b@huawei.com>
Date: Sat, 11 Oct 2025 11:25:03 +0800
From: Miaohe Lin <linmiaohe@...wei.com>
To: Longlong Xia <xialonglong2025@....com>
CC: <akpm@...ux-foundation.org>, <david@...hat.com>,
<wangkefeng.wang@...wei.com>, <xu.xin16@....com.cn>,
<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>, Longlong Xia
<xialonglong@...inos.cn>, Naoya Horiguchi <nao.horiguchi@...il.com>
Subject: Re: [PATCH RFC 1/1] mm/ksm: Add recovery mechanism for memory
failures
On 2025/10/9 15:00, Longlong Xia wrote:
> From: Longlong Xia <xialonglong@...inos.cn>
>
> When a hardware memory error occurs on a KSM page, the current
> behavior is to kill all processes mapping that page. This can
> be overly aggressive when KSM has multiple duplicate pages in
> a chain where other duplicates are still healthy.
>
> This patch introduces a recovery mechanism that attempts to migrate
> mappings from the failing KSM page to another healthy KSM page within
> the same chain before resorting to killing processes.
>
> The recovery process works as follows:
> 1. When a memory failure is detected on a KSM page, identify if the
> failing node is part of a chain (has duplicates)
> 2. Search for another healthy KSM page within the same chain
> 3. For each process mapping the failing page:
> - Update the PTE to point to the healthy KSM page
> - Migrate the rmap_item to the new stable node
> 4. If all migrations succeed, remove the failing node from the chain
> 5. Only kill processes if recovery is impossible or fails
Thanks for your patch. This looks really interesting to me and
I think this might be a significant improvement. :)
>
> Signed-off-by: Longlong Xia <xialonglong@...inos.cn>
> ---
> mm/ksm.c | 183 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 183 insertions(+)
>
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 160787bb121c..590d30cfe800 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -3084,6 +3084,183 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
> }
>
> #ifdef CONFIG_MEMORY_FAILURE
> +static struct ksm_stable_node *find_chain_head(struct ksm_stable_node *dup_node)
> +{
> + struct ksm_stable_node *stable_node, *dup;
> + struct rb_node *node;
> + int nid;
> +
> + if (!is_stable_node_dup(dup_node))
> + return NULL;
> +
> + for (nid = 0; nid < ksm_nr_node_ids; nid++) {
> + node = rb_first(root_stable_tree + nid);
> + for (; node; node = rb_next(node)) {
> + stable_node = rb_entry(node,
> + struct ksm_stable_node,
> + node);
> +
> + if (!is_stable_node_chain(stable_node))
> + continue;
> +
> + hlist_for_each_entry(dup, &stable_node->hlist,
> + hlist_dup) {
> + if (dup == dup_node)
> + return stable_node;
> + }
> + }
> + }
> +
> + return NULL;
> +}
> +
> +static struct folio *
> +find_target_folio(struct ksm_stable_node *failing_node, struct ksm_stable_node **target_dup)
> +{
> + struct ksm_stable_node *chain_head, *dup;
> + struct hlist_node *hlist_safe;
> + struct folio *target_folio;
> +
> + if (!is_stable_node_dup(failing_node))
> + return NULL;
> +
> + chain_head = find_chain_head(failing_node);
> + if (!chain_head)
> + return NULL;
> +
> + hlist_for_each_entry_safe(dup, hlist_safe, &chain_head->hlist, hlist_dup) {
> + if (dup == failing_node)
> + continue;
> +
> + target_folio = ksm_get_folio(dup, KSM_GET_FOLIO_TRYLOCK);
> + if (target_folio) {
> + *target_dup = dup;
> + return target_folio;
> + }
> + }
> +
> + return NULL;
> +}
> +
> +static int replace_failing_page(struct vm_area_struct *vma, struct page *page,
> + struct page *kpage, unsigned long addr)
> +{
> + struct folio *kfolio = page_folio(kpage);
> + struct mm_struct *mm = vma->vm_mm;
> + struct folio *folio = page_folio(page);
> + pmd_t *pmd;
> + pte_t *ptep;
> + pte_t newpte;
> + spinlock_t *ptl;
> + int err = -EFAULT;
> + struct mmu_notifier_range range;
> +
> + pmd = mm_find_pmd(mm, addr);
> + if (!pmd)
> + goto out;
> +
> + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
> + addr + PAGE_SIZE);
> + mmu_notifier_invalidate_range_start(&range);
> +
> + ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
> + if (!ptep)
> + goto out_mn;
> +
> + if (!is_zero_pfn(page_to_pfn(kpage))) {
> + folio_get(kfolio);
> + folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
> + newpte = mk_pte(kpage, vma->vm_page_prot);
> + } else {
> + newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
> + ksm_map_zero_page(mm);
> + dec_mm_counter(mm, MM_ANONPAGES);
> + }
> +
> + flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep)));
> + ptep_clear_flush(vma, addr, ptep);
> + set_pte_at(mm, addr, ptep, newpte);
> +
> + folio_remove_rmap_pte(folio, page, vma);
> + if (!folio_mapped(folio))
> + folio_free_swap(folio);
> + folio_put(folio);
> +
> + pte_unmap_unlock(ptep, ptl);
> + err = 0;
> +out_mn:
> + mmu_notifier_invalidate_range_end(&range);
> +out:
> + return err;
> +}
> +
> +static bool ksm_recover_within_chain(struct ksm_stable_node *failing_node)
> +{
> + struct ksm_rmap_item *rmap_item;
> + struct hlist_node *hlist_safe;
> + struct folio *failing_folio = NULL;
> + struct folio *target_folio = NULL;
> + struct ksm_stable_node *target_dup = NULL;
> + int err;
> +
> + if (!is_stable_node_dup(failing_node))
> + return false;
> +
> + failing_folio = ksm_get_folio(failing_node, KSM_GET_FOLIO_NOLOCK);
> + if (!failing_folio)
> + return false;
> +
> + target_folio = find_target_folio(failing_node, &target_dup);
> + if (!target_folio) {
> + folio_put(failing_folio);
> + return false;
> + }
> +
> + hlist_for_each_entry_safe(rmap_item, hlist_safe, &failing_node->hlist, hlist) {
> + struct mm_struct *mm = rmap_item->mm;
> + unsigned long addr = rmap_item->address & PAGE_MASK;
> + struct vm_area_struct *vma;
> +
> + mmap_read_lock(mm);
I briefly looked at the code and find lock order might be broken here.
mm/filemap.c documents the lock order as below:
* ->mmap_lock
* ->invalidate_lock (filemap_fault)
* ->lock_page (filemap_fault, access_process_vm)
*
But mmap_lock is held after folio is locked here. Is this a problem?
Thanks.
.
Powered by blists - more mailing lists