[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250901123028.3383461-11-max.kellermann@ionos.com>
Date: Mon, 1 Sep 2025 14:30:26 +0200
From: Max Kellermann <max.kellermann@...os.com>
To: akpm@...ux-foundation.org,
david@...hat.com,
axelrasmussen@...gle.com,
yuanchu@...gle.com,
willy@...radead.org,
hughd@...gle.com,
mhocko@...e.com,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
vbabka@...e.cz,
rppt@...nel.org,
surenb@...gle.com,
vishal.moola@...il.com,
linux@...linux.org.uk,
James.Bottomley@...senPartnership.com,
deller@....de,
agordeev@...ux.ibm.com,
gerald.schaefer@...ux.ibm.com,
hca@...ux.ibm.com,
gor@...ux.ibm.com,
borntraeger@...ux.ibm.com,
svens@...ux.ibm.com,
davem@...emloft.net,
andreas@...sler.com,
dave.hansen@...ux.intel.com,
luto@...nel.org,
peterz@...radead.org,
tglx@...utronix.de,
mingo@...hat.com,
bp@...en8.de,
x86@...nel.org,
hpa@...or.com,
chris@...kel.net,
jcmvbkbc@...il.com,
viro@...iv.linux.org.uk,
brauner@...nel.org,
jack@...e.cz,
weixugc@...gle.com,
baolin.wang@...ux.alibaba.com,
rientjes@...gle.com,
shakeel.butt@...ux.dev,
max.kellermann@...os.com,
thuth@...hat.com,
broonie@...nel.org,
osalvador@...e.de,
jfalempe@...hat.com,
mpe@...erman.id.au,
nysal@...ux.ibm.com,
linux-arm-kernel@...ts.infradead.org,
linux-parisc@...r.kernel.org,
linux-s390@...r.kernel.org,
sparclinux@...r.kernel.org,
linux-fsdevel@...r.kernel.org
Subject: [PATCH v5 10/12] mm: constify various inline test functions for improved const-correctness
We select certain test functions from mm_inline.h which either invoke
each other, functions that are already const-ified, or no further
functions.
It is therefore relatively trivial to const-ify them, which
provides a basis for further const-ification further up the call
stack.
One exception is the function folio_migrate_refs() which does write to
the "new" folio pointer; there, only the "old" folio pointer is being
constified; only its "flags" field is read, but nothing written.
Signed-off-by: Max Kellermann <max.kellermann@...os.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
---
include/linux/mm_inline.h | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 150302b4a905..8c4f6f95ba9f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -25,7 +25,7 @@
* 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
* ram or swap backed folio.
*/
-static inline int folio_is_file_lru(struct folio *folio)
+static inline int folio_is_file_lru(const struct folio *const folio)
{
return !folio_test_swapbacked(folio);
}
@@ -84,7 +84,7 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
* Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+static __always_inline enum lru_list folio_lru_list(const struct folio *const folio)
{
enum lru_list lru;
@@ -141,7 +141,7 @@ static inline int lru_tier_from_refs(int refs, bool workingset)
return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
}
-static inline int folio_lru_refs(struct folio *folio)
+static inline int folio_lru_refs(const struct folio *const folio)
{
unsigned long flags = READ_ONCE(folio->flags.f);
@@ -154,14 +154,14 @@ static inline int folio_lru_refs(struct folio *folio)
return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
-static inline int folio_lru_gen(struct folio *folio)
+static inline int folio_lru_gen(const struct folio *folio)
{
unsigned long flags = READ_ONCE(folio->flags.f);
return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
-static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+static inline bool lru_gen_is_active(const struct lruvec *const lruvec, const int gen)
{
unsigned long max_seq = lruvec->lrugen.max_seq;
@@ -217,12 +217,13 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
-static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
+static inline unsigned long lru_gen_folio_seq(const struct lruvec *const lruvec,
+ const struct folio *const folio,
bool reclaiming)
{
int gen;
int type = folio_is_file_lru(folio);
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ const struct lru_gen_folio *lrugen = &lruvec->lrugen;
/*
* +-----------------------------------+-----------------------------------+
@@ -302,7 +303,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *const new,
+ const struct folio *const old)
{
unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
@@ -330,7 +332,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
}
@@ -508,7 +510,7 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline bool mm_tlb_flush_pending(const struct mm_struct *const mm)
{
/*
* Must be called after having acquired the PTL; orders against that
@@ -521,7 +523,7 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline bool mm_tlb_flush_nested(const struct mm_struct *const mm)
{
/*
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
@@ -605,7 +607,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
return false;
}
-static inline bool vma_has_recency(struct vm_area_struct *vma)
+static inline bool vma_has_recency(const struct vm_area_struct *const vma)
{
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
return false;
--
2.47.2
Powered by blists - more mailing lists