[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250402201841.3245371-3-dwmw2@infradead.org>
Date: Wed, 2 Apr 2025 21:18:41 +0100
From: David Woodhouse <dwmw2@...radead.org>
To: Mike Rapoport <rppt@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
"Sauerwein, David" <dssauerw@...zon.de>,
Anshuman Khandual <anshuman.khandual@....com>,
Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
David Hildenbrand <david@...hat.com>,
Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Mike Rapoport <rppt@...ux.ibm.com>,
Will Deacon <will@...nel.org>,
kvmarm@...ts.cs.columbia.edu,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH 3/3] mm: Implement for_each_valid_pfn() for CONFIG_SPARSEMEM
From: David Woodhouse <dwmw@...zon.co.uk>
Introduce a pfn_first_valid() helper which takes a pointer to the PFN and
updates it to point to the first valid PFN starting from that point, and
returns true if a valid PFN was found.
This largely mirrors pfn_valid(), calling into a pfn_section_first_valid()
helper which is trivial for the !CONFIG_SPARSEMEM_VMEMMAP case, and in
the VMEMMAP case will skip to the next subsection as needed.
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
---
include/linux/mmzone.h | 65 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 32ecb5cadbaf..a389d1857b85 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2074,11 +2074,37 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
return usage ? test_bit(idx, usage->subsection_map) : 0;
}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
+ int idx = subsection_map_index(*pfn);
+ unsigned long bit;
+
+ if (!usage)
+ return false;
+
+ if (test_bit(idx, usage->subsection_map))
+ return true;
+
+ /* Find the next subsection that exists */
+ bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx);
+ if (bit == SUBSECTIONS_PER_SECTION)
+ return false;
+
+ *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
+ return true;
+}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
return 1;
}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ return true;
+}
#endif
void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
@@ -2127,6 +2153,45 @@ static inline int pfn_valid(unsigned long pfn)
return ret;
}
+
+static inline bool first_valid_pfn(unsigned long *p_pfn)
+{
+ unsigned long pfn = *p_pfn;
+ unsigned long nr = pfn_to_section_nr(pfn);
+ struct mem_section *ms;
+ bool ret = false;
+
+ ms = __pfn_to_section(pfn);
+
+ rcu_read_lock_sched();
+
+ while (!ret && nr <= __highest_present_section_nr) {
+ if (valid_section(ms) &&
+ (early_section(ms) || pfn_section_first_valid(ms, &pfn))) {
+ ret = true;
+ break;
+ }
+
+ nr++;
+ if (nr > __highest_present_section_nr)
+ break;
+
+ pfn = section_nr_to_pfn(nr);
+ ms = __pfn_to_section(pfn);
+ }
+
+ rcu_read_unlock_sched();
+
+ *p_pfn = pfn;
+
+ return ret;
+}
+
+#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
+ for ((_pfn) = (_start_pfn); \
+ first_valid_pfn(&(_pfn)) && (_pfn) < (_end_pfn); \
+ (_pfn)++)
+
#endif
static inline int pfn_in_present_section(unsigned long pfn)
--
2.49.0
Powered by blists - more mailing lists