lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <26cb47bb-df98-4bda-a101-3c27298e4452@lucifer.local>
Date: Mon, 1 Sep 2025 15:24:54 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Max Kellermann <max.kellermann@...os.com>
Cc: akpm@...ux-foundation.org, david@...hat.com, axelrasmussen@...gle.com,
        yuanchu@...gle.com, willy@...radead.org, hughd@...gle.com,
        mhocko@...e.com, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        Liam.Howlett@...cle.com, vbabka@...e.cz, rppt@...nel.org,
        surenb@...gle.com, vishal.moola@...il.com, linux@...linux.org.uk,
        James.Bottomley@...senpartnership.com, deller@....de,
        agordeev@...ux.ibm.com, gerald.schaefer@...ux.ibm.com,
        hca@...ux.ibm.com, gor@...ux.ibm.com, borntraeger@...ux.ibm.com,
        svens@...ux.ibm.com, davem@...emloft.net, andreas@...sler.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, x86@...nel.org,
        hpa@...or.com, chris@...kel.net, jcmvbkbc@...il.com,
        viro@...iv.linux.org.uk, brauner@...nel.org, jack@...e.cz,
        weixugc@...gle.com, baolin.wang@...ux.alibaba.com, rientjes@...gle.com,
        shakeel.butt@...ux.dev, thuth@...hat.com, broonie@...nel.org,
        osalvador@...e.de, jfalempe@...hat.com, mpe@...erman.id.au,
        nysal@...ux.ibm.com, linux-arm-kernel@...ts.infradead.org,
        linux-parisc@...r.kernel.org, linux-s390@...r.kernel.org,
        sparclinux@...r.kernel.org, linux-fsdevel@...r.kernel.org
Subject: Re: [PATCH v5 02/12] mm: constify pagemap related test functions for
 improved const-correctness

On Mon, Sep 01, 2025 at 02:30:18PM +0200, Max Kellermann wrote:
> We select certain test functions which either invoke each other,
> functions that are already const-ified, or no further functions.
>
> It is therefore relatively trivial to const-ify them, which
> provides a basis for further const-ification further up the call
> stack.
>
> Signed-off-by: Max Kellermann <max.kellermann@...os.com>
> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
> ---
>  include/linux/pagemap.h | 57 +++++++++++++++++++++--------------------
>  1 file changed, 29 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index a3e16d74792f..1d35f9e1416e 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
>  	return filemap_write_and_wait(inode->i_mapping);
>  }
>
> -static inline bool mapping_empty(struct address_space *mapping)
> +static inline bool mapping_empty(const struct address_space *const mapping)

Generally - I'm not sure how useful this 'double' const-ification is.

const struct <type> *const <val>
  ^                    ^
  |                    |
  |                    |
  1                    2

Means:

1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
   value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.

2. (less useful) We can't modify the actual pointer value either, so
   e.g. <param> = <new param> is prohibited.

I mean it's kinda nice to guarantee that this won't happen but I'm not sure if
we're getting much value for the noise?

We also never mention that we're doing this in any commit message or the cover
letter.

>  {
>  	return xa_empty(&mapping->i_pages);
>  }
> @@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
>   * refcount and the referenced bit, which will be elevated or set in
>   * the process of adding new cache pages to an inode.
>   */
> -static inline bool mapping_shrinkable(struct address_space *mapping)
> +static inline bool mapping_shrinkable(const struct address_space *const mapping)
>  {
>  	void *head;
>
> @@ -267,7 +267,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
>  	clear_bit(AS_UNEVICTABLE, &mapping->flags);
>  }
>
> -static inline bool mapping_unevictable(struct address_space *mapping)
> +static inline bool mapping_unevictable(const struct address_space *const mapping)
>  {
>  	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
>  }
> @@ -277,7 +277,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
>  	set_bit(AS_EXITING, &mapping->flags);
>  }
>
> -static inline int mapping_exiting(struct address_space *mapping)
> +static inline int mapping_exiting(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_EXITING, &mapping->flags);
>  }
> @@ -287,7 +287,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
>  	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
>  }
>
> -static inline int mapping_use_writeback_tags(struct address_space *mapping)
> +static inline int mapping_use_writeback_tags(const struct address_space *const mapping)
>  {
>  	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
>  }
> @@ -333,7 +333,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
>  	set_bit(AS_INACCESSIBLE, &mapping->flags);
>  }
>
> -static inline bool mapping_inaccessible(struct address_space *mapping)
> +static inline bool mapping_inaccessible(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_INACCESSIBLE, &mapping->flags);
>  }
> @@ -343,18 +343,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
>  	set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
>  }
>
> -static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
> +static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
>  }
>
> -static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
> +static inline gfp_t mapping_gfp_mask(const struct address_space *const mapping)
>  {
>  	return mapping->gfp_mask;
>  }
>
>  /* Restricts the given gfp_mask to what the mapping allows. */
> -static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
> +static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
>  		gfp_t gfp_mask)
>  {
>  	return mapping_gfp_mask(mapping) & gfp_mask;
> @@ -477,13 +477,13 @@ mapping_min_folio_order(const struct address_space *mapping)
>  }
>
>  static inline unsigned long
> -mapping_min_folio_nrpages(struct address_space *mapping)
> +mapping_min_folio_nrpages(const struct address_space *const mapping)
>  {
>  	return 1UL << mapping_min_folio_order(mapping);
>  }
>
>  static inline unsigned long
> -mapping_min_folio_nrbytes(struct address_space *mapping)
> +mapping_min_folio_nrbytes(const struct address_space *const mapping)
>  {
>  	return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
>  }
> @@ -497,7 +497,7 @@ mapping_min_folio_nrbytes(struct address_space *mapping)
>   * new folio to the page cache and need to know what index to give it,
>   * call this function.
>   */
> -static inline pgoff_t mapping_align_index(struct address_space *mapping,
> +static inline pgoff_t mapping_align_index(const struct address_space *const mapping,
>  					  pgoff_t index)
>  {
>  	return round_down(index, mapping_min_folio_nrpages(mapping));
> @@ -507,7 +507,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
>   * Large folio support currently depends on THP.  These dependencies are
>   * being worked on but are not yet fixed.
>   */
> -static inline bool mapping_large_folio_support(struct address_space *mapping)
> +static inline bool mapping_large_folio_support(const struct address_space *mapping)
>  {
>  	/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
>  	VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
> @@ -522,7 +522,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
>  	return PAGE_SIZE << mapping_max_folio_order(mapping);
>  }
>
> -static inline int filemap_nr_thps(struct address_space *mapping)
> +static inline int filemap_nr_thps(const struct address_space *const mapping)
>  {
>  #ifdef CONFIG_READ_ONLY_THP_FOR_FS
>  	return atomic_read(&mapping->nr_thps);
> @@ -936,7 +936,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
>   *
>   * Return: The index of the folio which follows this folio in the file.
>   */
> -static inline pgoff_t folio_next_index(struct folio *folio)
> +static inline pgoff_t folio_next_index(const struct folio *const folio)
>  {
>  	return folio->index + folio_nr_pages(folio);
>  }
> @@ -965,7 +965,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
>   * e.g., shmem did not move this folio to the swap cache.
>   * Return: true or false.
>   */
> -static inline bool folio_contains(struct folio *folio, pgoff_t index)
> +static inline bool folio_contains(const struct folio *const folio, pgoff_t index)
>  {
>  	VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
>  	return index - folio->index < folio_nr_pages(folio);
> @@ -1042,13 +1042,13 @@ static inline loff_t page_offset(struct page *page)
>  /*
>   * Get the offset in PAGE_SIZE (even for hugetlb folios).
>   */
> -static inline pgoff_t folio_pgoff(struct folio *folio)
> +static inline pgoff_t folio_pgoff(const struct folio *const folio)
>  {
>  	return folio->index;
>  }
>
> -static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
> -					unsigned long address)
> +static inline pgoff_t linear_page_index(const struct vm_area_struct *const vma,
> +					const unsigned long address)
>  {
>  	pgoff_t pgoff;
>  	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
> @@ -1468,7 +1468,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
>   * readahead_pos - The byte offset into the file of this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline loff_t readahead_pos(struct readahead_control *rac)
> +static inline loff_t readahead_pos(const struct readahead_control *const rac)
>  {
>  	return (loff_t)rac->_index * PAGE_SIZE;
>  }
> @@ -1477,7 +1477,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
>   * readahead_length - The number of bytes in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline size_t readahead_length(struct readahead_control *rac)
> +static inline size_t readahead_length(const struct readahead_control *const rac)
>  {
>  	return rac->_nr_pages * PAGE_SIZE;
>  }
> @@ -1486,7 +1486,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
>   * readahead_index - The index of the first page in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline pgoff_t readahead_index(struct readahead_control *rac)
> +static inline pgoff_t readahead_index(const struct readahead_control *const rac)
>  {
>  	return rac->_index;
>  }
> @@ -1495,7 +1495,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
>   * readahead_count - The number of pages in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline unsigned int readahead_count(struct readahead_control *rac)
> +static inline unsigned int readahead_count(const struct readahead_control *const rac)
>  {
>  	return rac->_nr_pages;
>  }
> @@ -1504,12 +1504,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
>   * readahead_batch_length - The number of bytes in the current batch.
>   * @rac: The readahead request.
>   */
> -static inline size_t readahead_batch_length(struct readahead_control *rac)
> +static inline size_t readahead_batch_length(const struct readahead_control *const rac)
>  {
>  	return rac->_batch_count * PAGE_SIZE;
>  }
>
> -static inline unsigned long dir_pages(struct inode *inode)
> +static inline unsigned long dir_pages(const struct inode *const inode)
>  {
>  	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
>  			       PAGE_SHIFT;
> @@ -1523,8 +1523,8 @@ static inline unsigned long dir_pages(struct inode *inode)
>   * Return: the number of bytes in the folio up to EOF,
>   * or -EFAULT if the folio was truncated.
>   */
> -static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
> -					      struct inode *inode)
> +static inline ssize_t folio_mkwrite_check_truncate(const struct folio *const folio,
> +						   const struct inode *const inode)
>  {
>  	loff_t size = i_size_read(inode);
>  	pgoff_t index = size >> PAGE_SHIFT;
> @@ -1555,7 +1555,8 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
>   * Return: The number of filesystem blocks covered by this folio.
>   */
>  static inline
> -unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
> +unsigned int i_blocks_per_folio(const struct inode *const inode,
> +				const struct folio *const folio)
>  {
>  	return folio_size(folio) >> inode->i_blkbits;
>  }
> --
> 2.47.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ