lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241014105912.3207374-48-ryan.roberts@arm.com>
Date: Mon, 14 Oct 2024 11:58:55 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Andrew Morton <akpm@...ux-foundation.org>,
	Anshuman Khandual <anshuman.khandual@....com>,
	Ard Biesheuvel <ardb@...nel.org>,
	Catalin Marinas <catalin.marinas@....com>,
	David Hildenbrand <david@...hat.com>,
	Greg Marsden <greg.marsden@...cle.com>,
	Ivan Ivanov <ivan.ivanov@...e.com>,
	Kalesh Singh <kaleshsingh@...gle.com>,
	Marc Zyngier <maz@...nel.org>,
	Mark Rutland <mark.rutland@....com>,
	Matthias Brugger <mbrugger@...e.com>,
	Miroslav Benes <mbenes@...e.cz>,
	Oliver Upton <oliver.upton@...ux.dev>,
	Will Deacon <will@...nel.org>
Cc: Ryan Roberts <ryan.roberts@....com>,
	kvmarm@...ts.linux.dev,
	linux-arm-kernel@...ts.infradead.org,
	linux-kernel@...r.kernel.org,
	linux-mm@...ck.org
Subject: [RFC PATCH v1 48/57] arm64: Convert switch to if for non-const comparison values

When we enable boot-time page size, some macros are no longer
compile-time constants. Where these macros are used as cases in switch
statements, the switch statements no longer compile.

Let's convert these to if/else blocks, which can handle the runtime
values.

Signed-off-by: Ryan Roberts <ryan.roberts@....com>
---

***NOTE***
Any confused maintainers may want to read the cover note here for context:
https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/

 arch/arm64/kvm/mmu.c        | 32 +++++++++++++++-----------------
 arch/arm64/mm/hugetlbpage.c | 34 +++++++++++-----------------------
 2 files changed, 26 insertions(+), 40 deletions(-)

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index a509b63bd4dd5..248a2d7ad6dbb 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1487,29 +1487,27 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		vma_shift = get_vma_page_shift(vma, hva);
 	}
 
-	switch (vma_shift) {
 #ifndef __PAGETABLE_PMD_FOLDED
-	case PUD_SHIFT:
-		if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
-			break;
-		fallthrough;
+	if (vma_shift == PUD_SHIFT) {
+		if (!fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+			vma_shift = PMD_SHIFT;
+	}
 #endif
-	case CONT_PMD_SHIFT:
+	if (vma_shift == CONT_PMD_SHIFT) {
 		vma_shift = PMD_SHIFT;
-		fallthrough;
-	case PMD_SHIFT:
-		if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
-			break;
-		fallthrough;
-	case CONT_PTE_SHIFT:
+	}
+	if (vma_shift == PMD_SHIFT) {
+		if (!fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
+			vma_shift = PAGE_SHIFT;
+	}
+	if (vma_shift == CONT_PTE_SHIFT) {
 		vma_shift = PAGE_SHIFT;
 		force_pte = true;
-		fallthrough;
-	case PAGE_SHIFT:
-		break;
-	default:
-		WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
 	}
+	if (vma_shift != PUD_SHIFT &&
+	    vma_shift != PMD_SHIFT &&
+	    vma_shift != PAGE_SHIFT)
+		WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
 
 	vma_pagesize = 1UL << vma_shift;
 
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 5f1e2103888b7..bc98c20655bba 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,16 +51,12 @@ void __init arm64_hugetlb_cma_reserve(void)
 
 static bool __hugetlb_valid_size(unsigned long size)
 {
-	switch (size) {
 #ifndef __PAGETABLE_PMD_FOLDED
-	case PUD_SIZE:
+	if (size == PUD_SIZE)
 		return pud_sect_supported();
 #endif
-	case CONT_PMD_SIZE:
-	case PMD_SIZE:
-	case CONT_PTE_SIZE:
+	if (size == CONT_PMD_SIZE || size == PMD_SIZE || size == CONT_PTE_SIZE)
 		return true;
-	}
 
 	return false;
 }
@@ -104,24 +100,20 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
 
 	*pgsize = size;
 
-	switch (size) {
 #ifndef __PAGETABLE_PMD_FOLDED
-	case PUD_SIZE:
+	if (size == PUD_SIZE) {
 		if (pud_sect_supported())
 			contig_ptes = 1;
-		break;
+	} else
 #endif
-	case PMD_SIZE:
+	if (size == PMD_SIZE) {
 		contig_ptes = 1;
-		break;
-	case CONT_PMD_SIZE:
+	} else if (size == CONT_PMD_SIZE) {
 		*pgsize = PMD_SIZE;
 		contig_ptes = CONT_PMDS;
-		break;
-	case CONT_PTE_SIZE:
+	} else if (size == CONT_PTE_SIZE) {
 		*pgsize = PAGE_SIZE;
 		contig_ptes = CONT_PTES;
-		break;
 	}
 
 	return contig_ptes;
@@ -339,20 +331,16 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
 {
 	unsigned long hp_size = huge_page_size(h);
 
-	switch (hp_size) {
 #ifndef __PAGETABLE_PMD_FOLDED
-	case PUD_SIZE:
+	if (hp_size == PUD_SIZE)
 		return PGDIR_SIZE - PUD_SIZE;
 #endif
-	case CONT_PMD_SIZE:
+	if (hp_size == CONT_PMD_SIZE)
 		return PUD_SIZE - CONT_PMD_SIZE;
-	case PMD_SIZE:
+	if (hp_size == PMD_SIZE)
 		return PUD_SIZE - PMD_SIZE;
-	case CONT_PTE_SIZE:
+	if (hp_size == CONT_PTE_SIZE)
 		return PMD_SIZE - CONT_PTE_SIZE;
-	default:
-		break;
-	}
 
 	return 0UL;
 }
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ