lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6233DD66-9A2E-4885-875D-1E79179146D7@nvidia.com>
Date: Fri, 09 Aug 2024 09:28:09 -0400
From: Zi Yan <ziy@...dia.com>
To: "Huang, Ying" <ying.huang@...el.com>,
 Andrew Morton <akpm@...ux-foundation.org>
Cc: Kefeng Wang <wangkefeng.wang@...wei.com>,
 David Hildenbrand <david@...hat.com>,
 Baolin Wang <baolin.wang@...ux.alibaba.com>, linux-mm@...ck.org,
 linux-kernel@...r.kernel.org, stable@...r.kernel.org
Subject: Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is
 changed

On 9 Aug 2024, at 3:52, Huang, Ying wrote:

> Zi Yan <ziy@...dia.com> writes:
>
>> On 8 Aug 2024, at 21:25, Huang, Ying wrote:
>>
>>> Kefeng Wang <wangkefeng.wang@...wei.com> writes:
>>>
>>>> On 2024/8/8 22:21, Zi Yan wrote:
>>>>> On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
>>>>>
>>>>>> On 08.08.24 16:13, Zi Yan wrote:
>>>>>>> On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
>>>>>>>
>>>>>>>> On 08.08.24 05:19, Baolin Wang wrote:
>>>>>>>>>
>>>>>>>>>
>>>> ...
>>>>>>>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
>>>>>>>>
>>>>>>>> Then, just copy the 3 LOC.
>>>>>>>>
>>>>>>>> For mm/memory.c that would be:
>>>>>>>>
>>>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>>>> index 67496dc5064f..410ba50ca746 100644
>>>>>>>> --- a/mm/memory.c
>>>>>>>> +++ b/mm/memory.c
>>>>>>>> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>>>>            if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
>>>>>>>>                   pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>>>> -               goto out;
>>>>>>>> +               return 0;
>>>>>>>>           }
>>>>>>>>            pte = pte_modify(old_pte, vma->vm_page_prot);
>>>>>>>> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>>>>                   vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>>>>>>                                                  vmf->address, &vmf->ptl);
>>>>>>>>                   if (unlikely(!vmf->pte))
>>>>>>>> -                       goto out;
>>>>>>>> +                       return 0;
>>>>>>>>                   if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
>>>>>>>>                           pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>>>> -                       goto out;
>>>>>>>> +                       return 0;
>>>>>>>>                   }
>>>>>>>>                   goto out_map;
>>>>>>>>           }
>>>>>>>>    -out:
>>>>>>>>           if (nid != NUMA_NO_NODE)
>>>>>>>>                   task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>>>>>           return 0;
>>>>
>>>> Maybe drop this part too,
>>>>
>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>> index 410ba50ca746..07343c1469e0 100644
>>>> --- a/mm/memory.c
>>>> +++ b/mm/memory.c
>>>> @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>         if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>>>>                 nid = target_nid;
>>>>                 flags |= TNF_MIGRATED;
>>>> +               goto out;
>>>>         } else {
>>>>                 flags |= TNF_MIGRATE_FAIL;
>>>>                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>> @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>                         pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>                         return 0;
>>>>                 }
>>>> -               goto out_map;
>>>>         }
>>>>
>>>> -       if (nid != NUMA_NO_NODE)
>>>> -               task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>> -       return 0;
>>>>  out_map:
>>>>         /*
>>>>          * Make it present again, depending on how arch implements
>>>
>>> IMHO, migration success is normal path, while migration failure is error
>>> processing path.  If so, it's better to use "goto" for error processing
>>> instead of normal path.
>>>
>>>> @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>                 numa_rebuild_single_mapping(vmf, vma, vmf->address,
>>>>                 vmf->pte,
>>>>                                             writable);
>>>>         pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>> +out:
>>>>         if (nid != NUMA_NO_NODE)
>>>>                 task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>         return 0;
>>>>
>>>>
>>
>> How about calling task_numa_fault and return in the migration successful path?
>> (target_nid cannot be NUMA_NO_NODE, so if is not needed)
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 3441f60d54ef..abdb73a68b80 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>         if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>>                 nid = target_nid;
>>                 flags |= TNF_MIGRATED;
>> -               goto out;
>> +               task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> +               return 0;
>>         } else {
>>                 flags |= TNF_MIGRATE_FAIL;
>>                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>> @@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>                 numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
>>                                             writable);
>>         pte_unmap_unlock(vmf->pte, vmf->ptl);
>> -out:
>>         if (nid != NUMA_NO_NODE)
>>                 task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>         return 0;
>>
>
> This looks better for me, or change it further.

Thanks. Will put a fixup below.

>
>        if (migrate_misplaced_folio(folio, vma, target_nid))
>                goto out_map_pt;
>
>        nid = target_nid;
>        flags |= TNF_MIGRATED;
>        task_numa_fault(last_cpupid, nid, nr_pages, flags);
>
>        return 0;
>
> out_map_pt:
>        flags |= TNF_MIGRATE_FAIL;
>        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>        ...
>

I will send a separate patch for this refactoring, since this patch
is meant for fixing commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault”).


Hi Andrew,

Can you fold the fixup below to this patch? Thanks.


From 645fa83b2eed14494755ed67e5c52a55656287ac Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@...dia.com>
Date: Thu, 8 Aug 2024 22:06:41 -0400
Subject: [PATCH] fixup! fixup! mm/numa: no task_numa_fault() call if page
 table is changed

Based on suggestion from Ying.

Link: https://lore.kernel.org/linux-mm/87cymizdvc.fsf@yhuang6-desk2.ccr.corp.intel.com/
Signed-off-by: Zi Yan <ziy@...dia.com>
---
 mm/huge_memory.c | 5 +++--
 mm/memory.c      | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4e4364a17e6d..0e79ccaaf5e4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1724,7 +1724,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	if (!migrate_misplaced_folio(folio, vma, target_nid)) {
 		flags |= TNF_MIGRATED;
 		nid = target_nid;
-		goto out;
+		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+		return 0;
 	} else {
 		flags |= TNF_MIGRATE_FAIL;
 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -1743,7 +1744,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
 	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
 	spin_unlock(vmf->ptl);
-out:
+
 	if (nid != NUMA_NO_NODE)
 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
 	return 0;
diff --git a/mm/memory.c b/mm/memory.c
index d9b1dff9dc57..6c3aadf3e5ad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5523,7 +5523,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	if (!migrate_misplaced_folio(folio, vma, target_nid)) {
 		nid = target_nid;
 		flags |= TNF_MIGRATED;
-		goto out;
+		task_numa_fault(last_cpupid, nid, nr_pages, flags);
+		return 0;
 	} else {
 		flags |= TNF_MIGRATE_FAIL;
 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5547,7 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
 		numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
 					    writable);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
-out:
+
 	if (nid != NUMA_NO_NODE)
 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
 	return 0;
-- 
2.43.0




Best Regards,
Yan, Zi

Download attachment "signature.asc" of type "application/pgp-signature" (855 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ