lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <fa3c7352-f089-4a7b-8d4b-f6d371c236ce@linux.dev>
Date: Thu, 19 Jun 2025 16:28:53 +0800
From: Ye Liu <ye.liu@...ux.dev>
To: Dev Jain <dev.jain@....com>, Andrew Morton <akpm@...ux-foundation.org>,
 David Hildenbrand <david@...hat.com>,
 Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Cc: Ye Liu <liuye@...inos.cn>, Xu Xin <xu.xin16@....com.cn>,
 Chengming Zhou <chengming.zhou@...ux.dev>, Rik van Riel <riel@...riel.com>,
 "Liam R. Howlett" <Liam.Howlett@...cle.com>, Vlastimil Babka
 <vbabka@...e.cz>, Harry Yoo <harry.yoo@...cle.com>, linux-mm@...ck.org,
 linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm/rmap: Add NULL checks for rmap_walk_control callbacks


Hi Lorenzo and Dev,

Thanks for your feedback. I understand all your points and will drop this patch.

Best regards,
Ye Liu

On 2025/6/19 16:17, Dev Jain wrote:
> 
> On 19/06/25 1:20 pm, Ye Liu wrote:
>> From: Ye Liu <liuye@...inos.cn>
>>
>> Add NULL pointer checks for rmap_one callback in rmap_walk operations
>> to prevent potential NULL pointer dereferences. Also clean up some
>> code by removing redundant comments and caching folio_nr_pages().
>>
>> Signed-off-by: Ye Liu <liuye@...inos.cn>
>> ---
> 
> Don't really see the point of this patch. The rmap_one call back will
> always be there as we need a way to define how to unmap/do the reverse
> map walk for one VMA at a time. And the folio_nr_pages() will probably
> get cached by the compiler anyways.
> 
>>   mm/ksm.c  |  2 +-
>>   mm/rmap.c | 14 +++++++-------
>>   2 files changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/mm/ksm.c b/mm/ksm.c
>> index 18b3690bb69a..22ad069d1860 100644
>> --- a/mm/ksm.c
>> +++ b/mm/ksm.c
>> @@ -3068,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
>>               if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
>>                   continue;
>>   -            if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
>> +            if (rwc->rmap_one && !rwc->rmap_one(folio, vma, addr, rwc->arg)) {
>>                   anon_vma_unlock_read(anon_vma);
>>                   return;
>>               }
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index fb63d9256f09..17d43d104a0d 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1202,8 +1202,7 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
>>       if (!mapping)
>>           return 0;
>>   -    __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
>> -             /* locked = */false);
>> +    __rmap_walk_file(NULL, mapping, pgoff, nr_pages, &rwc, false);
>>         return state.cleaned;
>>   }
>> @@ -2806,6 +2805,7 @@ static void rmap_walk_anon(struct folio *folio,
>>       struct anon_vma *anon_vma;
>>       pgoff_t pgoff_start, pgoff_end;
>>       struct anon_vma_chain *avc;
>> +    unsigned long nr_pages;
>>         if (locked) {
>>           anon_vma = folio_anon_vma(folio);
>> @@ -2817,13 +2817,13 @@ static void rmap_walk_anon(struct folio *folio,
>>       if (!anon_vma)
>>           return;
>>   +    nr_pages = folio_nr_pages(folio);
>>       pgoff_start = folio_pgoff(folio);
>> -    pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
>> +    pgoff_end = pgoff_start + nr_pages - 1;
>>       anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
>>               pgoff_start, pgoff_end) {
>>           struct vm_area_struct *vma = avc->vma;
>> -        unsigned long address = vma_address(vma, pgoff_start,
>> -                folio_nr_pages(folio));
>> +        unsigned long address = vma_address(vma, pgoff_start, nr_pages);
>>             VM_BUG_ON_VMA(address == -EFAULT, vma);
>>           cond_resched();
>> @@ -2831,7 +2831,7 @@ static void rmap_walk_anon(struct folio *folio,
>>           if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
>>               continue;
>>   -        if (!rwc->rmap_one(folio, vma, address, rwc->arg))
>> +        if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg))
>>               break;
>>           if (rwc->done && rwc->done(folio))
>>               break;
>> @@ -2894,7 +2894,7 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
>>           if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
>>               continue;
>>   -        if (!rwc->rmap_one(folio, vma, address, rwc->arg))
>> +        if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg))
>>               goto done;
>>           if (rwc->done && rwc->done(folio))
>>               goto done;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ