[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fl5lkxxvlsyeur7e7ls2tnsh4afzyhvotoaluxcv2ge2tm2dp3@7vf7xvbf7mkp>
Date: Fri, 19 Dec 2025 12:27:27 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Jane Chu <jane.chu@...cle.com>
Cc: muchun.song@...ux.dev, osalvador@...e.de, david@...nel.org,
linmiaohe@...wei.com, jiaqiyan@...gle.com, william.roche@...cle.com,
rientjes@...gle.com, akpm@...ux-foundation.org,
lorenzo.stoakes@...cle.com, rppt@...nel.org, surenb@...gle.com,
mhocko@...e.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm/memory-failure: teach kill_accessing_process to
accept hugetlb tail page pfn
* Jane Chu <jane.chu@...cle.com> [251219 01:28]:
> When a hugetlb folio is being poisoned again, try_memory_failure_hugetlb()
> passed head pfn to kill_accessing_process(), that is not right.
> The precise pfn of the poisoned page should be used in order to
> determine the precise vaddr as the SIGBUS payload.
>
> This issue has already been taken care of in the normal path, that is,
> hwpoison_user_mappings(), see [1][2]. Further more, for [3] to work
> correctly in the hugetlb repoisoning case, it's essential to inform
> VM the precise poisoned page, not the head page.
>
> [1] https://lkml.kernel.org/r/20231218135837.3310403-1-willy@infradead.org
> [2] https://lkml.kernel.org/r/20250224211445.2663312-1-jane.chu@oracle.com
> [3] https://lore.kernel.org/lkml/20251116013223.1557158-1-jiaqiyan@google.com/
>
> Cc: <stable@...r.kernel.org>
> Signed-off-by: Jane Chu <jane.chu@...cle.com>
I don't see stable in the Cc list, did you miss it?
Looks good, small nit below.
Reviewed-by: Liam R. Howlett <Liam.Howlett@...cle.com>
> ---
> mm/memory-failure.c | 22 ++++++++++++----------
> 1 file changed, 12 insertions(+), 10 deletions(-)
>
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 3edebb0cda30..c9d87811b1ea 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -681,9 +681,11 @@ static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
> }
>
> static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
> - unsigned long poisoned_pfn, struct to_kill *tk)
> + unsigned long poisoned_pfn, struct to_kill *tk,
> + int pte_nr)
> {
> unsigned long pfn = 0;
> + unsigned long hwpoison_vaddr;
>
> if (pte_present(pte)) {
> pfn = pte_pfn(pte);
> @@ -694,10 +696,11 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
> pfn = swp_offset_pfn(swp);
> }
>
> - if (!pfn || pfn != poisoned_pfn)
> + if (!pfn || (pfn > poisoned_pfn || (pfn + pte_nr - 1) < poisoned_pfn))
> return 0;
>
> - set_to_kill(tk, addr, shift);
> + hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT);
> + set_to_kill(tk, hwpoison_vaddr, shift);
> return 1;
> }
>
> @@ -749,7 +752,7 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
>
> for (; addr != end; ptep++, addr += PAGE_SIZE) {
> ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
> - hwp->pfn, &hwp->tk);
> + hwp->pfn, &hwp->tk, 1);
> if (ret == 1)
> break;
> }
> @@ -772,8 +775,8 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
>
> ptl = huge_pte_lock(h, walk->mm, ptep);
> pte = huge_ptep_get(walk->mm, addr, ptep);
> - ret = check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
> - hwp->pfn, &hwp->tk);
> + ret = check_hwpoisoned_entry(pte, addr, huge_page_shift(h), hwp->pfn,
> + &hwp->tk, pages_per_huge_page(h));
> spin_unlock(ptl);
> return ret;
> }
> @@ -2023,10 +2026,8 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
> *hugetlb = 0;
> return 0;
> } else if (res == -EHWPOISON) {
> - if (flags & MF_ACTION_REQUIRED) {
> - folio = page_folio(p);
> - res = kill_accessing_process(current, folio_pfn(folio), flags);
> - }
> + if (flags & MF_ACTION_REQUIRED)
> + res = kill_accessing_process(current, pfn, flags);
> action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
> return res;
> } else if (res == -EBUSY) {
> @@ -2037,6 +2038,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
> return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
> }
>
> +
nit: extra witespace added.
> folio = page_folio(p);
> folio_lock(folio);
>
> --
> 2.43.5
>
Powered by blists - more mailing lists