[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9454603f-c187-4386-8244-69f304197954@arm.com>
Date: Thu, 19 Jun 2025 13:47:25 +0530
From: Dev Jain <dev.jain@....com>
To: Ye Liu <ye.liu@...ux.dev>, Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Cc: Ye Liu <liuye@...inos.cn>, Xu Xin <xu.xin16@....com.cn>,
Chengming Zhou <chengming.zhou@...ux.dev>, Rik van Riel <riel@...riel.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>, Vlastimil Babka
<vbabka@...e.cz>, Harry Yoo <harry.yoo@...cle.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm/rmap: Add NULL checks for rmap_walk_control callbacks
On 19/06/25 1:20 pm, Ye Liu wrote:
> From: Ye Liu <liuye@...inos.cn>
>
> Add NULL pointer checks for rmap_one callback in rmap_walk operations
> to prevent potential NULL pointer dereferences. Also clean up some
> code by removing redundant comments and caching folio_nr_pages().
>
> Signed-off-by: Ye Liu <liuye@...inos.cn>
> ---
Don't really see the point of this patch. The rmap_one call back will
always be there as we need a way to define how to unmap/do the reverse
map walk for one VMA at a time. And the folio_nr_pages() will probably
get cached by the compiler anyways.
> mm/ksm.c | 2 +-
> mm/rmap.c | 14 +++++++-------
> 2 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 18b3690bb69a..22ad069d1860 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -3068,7 +3068,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
> if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
> continue;
>
> - if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
> + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, addr, rwc->arg)) {
> anon_vma_unlock_read(anon_vma);
> return;
> }
> diff --git a/mm/rmap.c b/mm/rmap.c
> index fb63d9256f09..17d43d104a0d 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1202,8 +1202,7 @@ int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
> if (!mapping)
> return 0;
>
> - __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
> - /* locked = */false);
> + __rmap_walk_file(NULL, mapping, pgoff, nr_pages, &rwc, false);
>
> return state.cleaned;
> }
> @@ -2806,6 +2805,7 @@ static void rmap_walk_anon(struct folio *folio,
> struct anon_vma *anon_vma;
> pgoff_t pgoff_start, pgoff_end;
> struct anon_vma_chain *avc;
> + unsigned long nr_pages;
>
> if (locked) {
> anon_vma = folio_anon_vma(folio);
> @@ -2817,13 +2817,13 @@ static void rmap_walk_anon(struct folio *folio,
> if (!anon_vma)
> return;
>
> + nr_pages = folio_nr_pages(folio);
> pgoff_start = folio_pgoff(folio);
> - pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
> + pgoff_end = pgoff_start + nr_pages - 1;
> anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
> pgoff_start, pgoff_end) {
> struct vm_area_struct *vma = avc->vma;
> - unsigned long address = vma_address(vma, pgoff_start,
> - folio_nr_pages(folio));
> + unsigned long address = vma_address(vma, pgoff_start, nr_pages);
>
> VM_BUG_ON_VMA(address == -EFAULT, vma);
> cond_resched();
> @@ -2831,7 +2831,7 @@ static void rmap_walk_anon(struct folio *folio,
> if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
> continue;
>
> - if (!rwc->rmap_one(folio, vma, address, rwc->arg))
> + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg))
> break;
> if (rwc->done && rwc->done(folio))
> break;
> @@ -2894,7 +2894,7 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
> if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
> continue;
>
> - if (!rwc->rmap_one(folio, vma, address, rwc->arg))
> + if (rwc->rmap_one && !rwc->rmap_one(folio, vma, address, rwc->arg))
> goto done;
> if (rwc->done && rwc->done(folio))
> goto done;
Powered by blists - more mailing lists