lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <75fa6c34-c972-4710-b37c-a03ae797465b@lucifer.local>
Date: Tue, 2 Sep 2025 07:17:06 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Max Kellermann <max.kellermann@...os.com>
Cc: akpm@...ux-foundation.org, david@...hat.com, axelrasmussen@...gle.com,
        yuanchu@...gle.com, willy@...radead.org, hughd@...gle.com,
        mhocko@...e.com, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        Liam.Howlett@...cle.com, vbabka@...e.cz, rppt@...nel.org,
        surenb@...gle.com, vishal.moola@...il.com, linux@...linux.org.uk,
        James.Bottomley@...senpartnership.com, deller@....de,
        agordeev@...ux.ibm.com, gerald.schaefer@...ux.ibm.com,
        hca@...ux.ibm.com, gor@...ux.ibm.com, borntraeger@...ux.ibm.com,
        svens@...ux.ibm.com, davem@...emloft.net, andreas@...sler.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, x86@...nel.org,
        hpa@...or.com, chris@...kel.net, jcmvbkbc@...il.com,
        viro@...iv.linux.org.uk, brauner@...nel.org, jack@...e.cz,
        weixugc@...gle.com, baolin.wang@...ux.alibaba.com, rientjes@...gle.com,
        shakeel.butt@...ux.dev, thuth@...hat.com, broonie@...nel.org,
        osalvador@...e.de, jfalempe@...hat.com, mpe@...erman.id.au,
        nysal@...ux.ibm.com, linux-arm-kernel@...ts.infradead.org,
        linux-parisc@...r.kernel.org, linux-s390@...r.kernel.org,
        sparclinux@...r.kernel.org, linux-fsdevel@...r.kernel.org
Subject: Re: [PATCH v6 11/12] mm: constify assert/test functions in mm.h

On Mon, Sep 01, 2025 at 10:50:20PM +0200, Max Kellermann wrote:
> For improved const-correctness.
>
> We select certain assert and test functions which either invoke each
> other, functions that are already const-ified, or no further
> functions.
>
> It is therefore relatively trivial to const-ify them, which
> provides a basis for further const-ification further up the call
> stack.
>
> Signed-off-by: Max Kellermann <max.kellermann@...os.com>

LGTM, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>

> ---
>  include/linux/mm.h | 40 ++++++++++++++++++++--------------------
>  1 file changed, 20 insertions(+), 20 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 23864c3519d6..c3767688771c 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -703,7 +703,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
>  		mmap_read_unlock(vmf->vma->vm_mm);
>  }
>
> -static inline void assert_fault_locked(struct vm_fault *vmf)
> +static inline void assert_fault_locked(const struct vm_fault *vmf)
>  {
>  	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
>  		vma_assert_locked(vmf->vma);
> @@ -716,7 +716,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
>  	mmap_read_unlock(vmf->vma->vm_mm);
>  }
>
> -static inline void assert_fault_locked(struct vm_fault *vmf)
> +static inline void assert_fault_locked(const struct vm_fault *vmf)
>  {
>  	mmap_assert_locked(vmf->vma->vm_mm);
>  }
> @@ -859,7 +859,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
>  		vma->vm_end >= vma->vm_mm->start_stack;
>  }
>
> -static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
> +static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
>  {
>  	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
>
> @@ -873,7 +873,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
>  	return false;
>  }
>
> -static inline bool vma_is_foreign(struct vm_area_struct *vma)
> +static inline bool vma_is_foreign(const struct vm_area_struct *vma)
>  {
>  	if (!current->mm)
>  		return true;
> @@ -884,7 +884,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
>  	return false;
>  }
>
> -static inline bool vma_is_accessible(struct vm_area_struct *vma)
> +static inline bool vma_is_accessible(const struct vm_area_struct *vma)
>  {
>  	return vma->vm_flags & VM_ACCESS_FLAGS;
>  }
> @@ -895,7 +895,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
>  		(VM_SHARED | VM_MAYWRITE);
>  }
>
> -static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
> +static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
>  {
>  	return is_shared_maywrite(vma->vm_flags);
>  }
> @@ -1839,7 +1839,7 @@ static inline struct folio *pfn_folio(unsigned long pfn)
>  }
>
>  #ifdef CONFIG_MMU
> -static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
> +static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
>  {
>  	return pfn_pte(page_to_pfn(page), pgprot);
>  }
> @@ -1854,7 +1854,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
>   *
>   * Return: A page table entry suitable for mapping this folio.
>   */
> -static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
> +static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
>  {
>  	return pfn_pte(folio_pfn(folio), pgprot);
>  }
> @@ -1870,7 +1870,7 @@ static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
>   *
>   * Return: A page table entry suitable for mapping this folio.
>   */
> -static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
> +static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
>  {
>  	return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
>  }
> @@ -1886,7 +1886,7 @@ static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
>   *
>   * Return: A page table entry suitable for mapping this folio.
>   */
> -static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot)
> +static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
>  {
>  	return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
>  }
> @@ -3488,7 +3488,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
>  	return mtree_load(&mm->mm_mt, addr);
>  }
>
> -static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
> +static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
>  {
>  	if (vma->vm_flags & VM_GROWSDOWN)
>  		return stack_guard_gap;
> @@ -3500,7 +3500,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
>  	return 0;
>  }
>
> -static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
> +static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
>  {
>  	unsigned long gap = stack_guard_start_gap(vma);
>  	unsigned long vm_start = vma->vm_start;
> @@ -3511,7 +3511,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
>  	return vm_start;
>  }
>
> -static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
> +static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
>  {
>  	unsigned long vm_end = vma->vm_end;
>
> @@ -3523,7 +3523,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
>  	return vm_end;
>  }
>
> -static inline unsigned long vma_pages(struct vm_area_struct *vma)
> +static inline unsigned long vma_pages(const struct vm_area_struct *vma)
>  {
>  	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
>  }
> @@ -3540,7 +3540,7 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
>  	return vma;
>  }
>
> -static inline bool range_in_vma(struct vm_area_struct *vma,
> +static inline bool range_in_vma(const struct vm_area_struct *vma,
>  				unsigned long start, unsigned long end)
>  {
>  	return (vma && vma->vm_start <= start && end <= vma->vm_end);
> @@ -3656,7 +3656,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
>   * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
>   * a (NUMA hinting) fault is required.
>   */
> -static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
> +static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
>  					   unsigned int flags)
>  {
>  	/*
> @@ -3786,7 +3786,7 @@ static inline bool debug_guardpage_enabled(void)
>  	return static_branch_unlikely(&_debug_guardpage_enabled);
>  }
>
> -static inline bool page_is_guard(struct page *page)
> +static inline bool page_is_guard(const struct page *page)
>  {
>  	if (!debug_guardpage_enabled())
>  		return false;
> @@ -3817,7 +3817,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
>  static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
>  static inline unsigned int debug_guardpage_minorder(void) { return 0; }
>  static inline bool debug_guardpage_enabled(void) { return false; }
> -static inline bool page_is_guard(struct page *page) { return false; }
> +static inline bool page_is_guard(const struct page *page) { return false; }
>  static inline bool set_page_guard(struct zone *zone, struct page *page,
>  			unsigned int order) { return false; }
>  static inline void clear_page_guard(struct zone *zone, struct page *page,
> @@ -3899,7 +3899,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
>  #endif
>
>  #ifdef CONFIG_SPARSEMEM_VMEMMAP
> -static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
> +static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
>  {
>  	/* number of pfns from base where pfn_to_page() is valid */
>  	if (altmap)
> @@ -3913,7 +3913,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
>  	altmap->alloc -= nr_pfns;
>  }
>  #else
> -static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
> +static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
>  {
>  	return 0;
>  }
> --
> 2.47.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ