[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251119224140.8616-40-david.laight.linux@gmail.com>
Date: Wed, 19 Nov 2025 22:41:35 +0000
From: david.laight.linux@...il.com
To: linux-kernel@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Christoph Lameter <cl@...two.org>,
David Hildenbrand <david@...hat.com>,
Dennis Zhou <dennis@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Mike Rapoport <rppt@...nel.org>,
Tejun Heo <tj@...nel.org>,
Yuanchu Xie <yuanchu@...gle.com>,
David Laight <david.laight.linux@...il.com>
Subject: [PATCH 39/44] mm: use min() instead of min_t()
From: David Laight <david.laight.linux@...il.com>
min_t(unsigned int, a, b) casts an 'unsigned long' to 'unsigned int'.
Use min(a, b) instead as it promotes any 'unsigned int' to 'unsigned long'
and so cannot discard significant bits.
In this case the 'unsigned long' values are small enough that the result
is ok.
(Similarly for clamp_t().)
Detected by an extra check added to min_t().
Signed-off-by: David Laight <david.laight.linux@...il.com>
---
mm/gup.c | 4 ++--
mm/memblock.c | 2 +-
mm/memory.c | 2 +-
mm/percpu.c | 2 +-
mm/truncate.c | 3 +--
mm/vmscan.c | 2 +-
6 files changed, 7 insertions(+), 8 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index a8ba5112e4d0..55435b90dcc3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -237,8 +237,8 @@ static inline struct folio *gup_folio_range_next(struct page *start,
unsigned int nr = 1;
if (folio_test_large(folio))
- nr = min_t(unsigned int, npages - i,
- folio_nr_pages(folio) - folio_page_idx(folio, next));
+ nr = min(npages - i,
+ folio_nr_pages(folio) - folio_page_idx(folio, next));
*ntails = nr;
return folio;
diff --git a/mm/memblock.c b/mm/memblock.c
index e23e16618e9b..19b491d39002 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2208,7 +2208,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
* the case.
*/
if (start)
- order = min_t(int, MAX_PAGE_ORDER, __ffs(start));
+ order = min(MAX_PAGE_ORDER, __ffs(start));
else
order = MAX_PAGE_ORDER;
diff --git a/mm/memory.c b/mm/memory.c
index 74b45e258323..72f7bd71d65f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2375,7 +2375,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
while (pages_to_write_in_pmd) {
int pte_idx = 0;
- const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
+ const int batch_size = min(pages_to_write_in_pmd, 8);
start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
if (!start_pte) {
diff --git a/mm/percpu.c b/mm/percpu.c
index 81462ce5866e..cad59221d298 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1228,7 +1228,7 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
/*
* Search to find a fit.
*/
- end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
+ end = umin(start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
pcpu_chunk_map_bits(chunk));
bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
align_mask, &area_off, &area_bits);
diff --git a/mm/truncate.c b/mm/truncate.c
index 91eb92a5ce4f..7a56372d39a3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -849,8 +849,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
unsigned int offset, end;
offset = from - folio_pos(folio);
- end = min_t(unsigned int, to - folio_pos(folio),
- folio_size(folio));
+ end = umin(to - folio_pos(folio), folio_size(folio));
folio_zero_segment(folio, offset, end);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b2fc8b626d3d..82cd99a5d843 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3489,7 +3489,7 @@ static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
static bool suitable_to_scan(int total, int young)
{
- int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
+ int n = clamp(cache_line_size() / sizeof(pte_t), 2, 8);
/* suitable if the average number of young PTEs per cacheline is >=1 */
return young * n >= total;
--
2.39.5
Powered by blists - more mailing lists