[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220819041156.873873-3-rppt@kernel.org>
Date: Fri, 19 Aug 2022 07:11:53 +0300
From: Mike Rapoport <rppt@...nel.org>
To: linux-arm-kernel@...ts.infradead.org
Cc: Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Guanghui Feng <guanghuifeng@...ux.alibaba.com>,
Mark Rutland <mark.rutland@....com>,
Mike Rapoport <rppt@...nel.org>,
Mike Rapoport <rppt@...ux.ibm.com>,
Will Deacon <will@...nel.org>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [PATCH 2/5] arm64/mmu: drop _hotplug from unmap_hotplug_* function names
From: Mike Rapoport <rppt@...ux.ibm.com>
so that they can be used for remapping crash kernel.
Signed-off-by: Mike Rapoport <rppt@...ux.ibm.com>
---
arch/arm64/mm/mmu.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index bf303f1dea25..ea81e40a25cd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -911,7 +911,7 @@ static bool pgtable_range_aligned(unsigned long start, unsigned long end,
return true;
}
-static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
+static void unmap_pte_range(pmd_t *pmdp, unsigned long addr,
unsigned long end, bool free_mapped,
struct vmem_altmap *altmap)
{
@@ -932,7 +932,7 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
} while (addr += PAGE_SIZE, addr < end);
}
-static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
+static void unmap_pmd_range(pud_t *pudp, unsigned long addr,
unsigned long end, bool free_mapped,
struct vmem_altmap *altmap)
{
@@ -961,11 +961,11 @@ static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
continue;
}
WARN_ON(!pmd_table(pmd));
- unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
+ unmap_pte_range(pmdp, addr, next, free_mapped, altmap);
} while (addr = next, addr < end);
}
-static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
+static void unmap_pud_range(p4d_t *p4dp, unsigned long addr,
unsigned long end, bool free_mapped,
struct vmem_altmap *altmap)
{
@@ -994,11 +994,11 @@ static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
continue;
}
WARN_ON(!pud_table(pud));
- unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
+ unmap_pmd_range(pudp, addr, next, free_mapped, altmap);
} while (addr = next, addr < end);
}
-static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
+static void unmap_p4d_range(pgd_t *pgdp, unsigned long addr,
unsigned long end, bool free_mapped,
struct vmem_altmap *altmap)
{
@@ -1013,11 +1013,11 @@ static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
continue;
WARN_ON(!p4d_present(p4d));
- unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
+ unmap_pud_range(p4dp, addr, next, free_mapped, altmap);
} while (addr = next, addr < end);
}
-static void unmap_hotplug_range(unsigned long addr, unsigned long end,
+static void unmap_range(unsigned long addr, unsigned long end,
bool free_mapped, struct vmem_altmap *altmap)
{
unsigned long next;
@@ -1039,7 +1039,7 @@ static void unmap_hotplug_range(unsigned long addr, unsigned long end,
continue;
WARN_ON(!pgd_present(pgd));
- unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
+ unmap_p4d_range(pgdp, addr, next, free_mapped, altmap);
} while (addr = next, addr < end);
}
@@ -1258,7 +1258,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
- unmap_hotplug_range(start, end, true, altmap);
+ unmap_range(start, end, true, altmap);
free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -1522,7 +1522,7 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
WARN_ON(pgdir != init_mm.pgd);
WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
- unmap_hotplug_range(start, end, false, NULL);
+ unmap_range(start, end, false, NULL);
free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
}
--
2.35.3
Powered by blists - more mailing lists