[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210430143607.135005-4-leobras.c@gmail.com>
Date: Fri, 30 Apr 2021 11:36:10 -0300
From: Leonardo Bras <leobras.c@...il.com>
To: Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Leonardo Bras <leobras.c@...il.com>,
Sandipan Das <sandipan@...ux.ibm.com>,
Mike Rapoport <rppt@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Nicholas Piggin <npiggin@...il.com>,
Nathan Lynch <nathanl@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Scott Cheloha <cheloha@...ux.ibm.com>,
Laurent Dufour <ldufour@...ux.ibm.com>
Cc: linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org
Subject: [PATCH v2 3/3] powerpc/mm/hash: Avoid multiple HPT resize-downs on memory hotunplug
During memory hotunplug, after each LMB is removed, the HPT may be
resized-down if it would map a max of 4 times the current amount of memory.
(2 shifts, due to introduced histeresis)
It usually is not an issue, but it can take a lot of time if HPT
resizing-down fails. This happens because resize-down failures
usually repeat at each LMB removal, until there are no more bolted entries
conflict, which can take a while to happen.
This can be solved by doing a single HPT resize at the end of memory
hotunplug, after all requested entries are removed.
To make this happen, it's necessary to temporarily disable all HPT
resize-downs before hotunplug, re-enable them after hotunplug ends,
and then resize-down HPT to the current memory size.
As an example, hotunplugging 256GB from a 385GB guest took 621s without
this patch, and 100s after applied.
Signed-off-by: Leonardo Bras <leobras.c@...il.com>
---
arch/powerpc/include/asm/book3s/64/hash.h | 2 +
arch/powerpc/mm/book3s64/hash_utils.c | 45 +++++++++++++++++--
.../platforms/pseries/hotplug-memory.c | 26 +++++++++++
3 files changed, 70 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index fad4af8b8543..6cd66e7e98c9 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -256,6 +256,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int hash__remove_section_mapping(unsigned long start, unsigned long end);
void hash_batch_expand_prepare(unsigned long newsize);
+void hash_batch_shrink_begin(void);
+void hash_batch_shrink_end(void);
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 3fa395b3fe57..73ecd0f61acd 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -795,6 +795,9 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
+
+static DEFINE_MUTEX(hpt_resize_down_lock);
+
static int resize_hpt_for_hotplug(unsigned long new_mem_size, bool shrinking)
{
unsigned target_hpt_shift;
@@ -805,7 +808,7 @@ static int resize_hpt_for_hotplug(unsigned long new_mem_size, bool shrinking)
target_hpt_shift = htab_shift_for_mem_size(new_mem_size);
if (shrinking) {
-
+ int ret;
/*
* To avoid lots of HPT resizes if memory size is fluctuating
* across a boundary, we deliberately have some hysterisis
@@ -818,10 +821,20 @@ static int resize_hpt_for_hotplug(unsigned long new_mem_size, bool shrinking)
if (target_hpt_shift >= ppc64_pft_size - 1)
return 0;
- } else if (target_hpt_shift <= ppc64_pft_size) {
- return 0;
+ /* When batch removing entries, only resizes HPT at the end. */
+
+ if (!mutex_trylock(&hpt_resize_down_lock))
+ return 0;
+
+ ret = mmu_hash_ops.resize_hpt(target_hpt_shift);
+
+ mutex_unlock(&hpt_resize_down_lock);
+ return ret;
}
+ if (target_hpt_shift <= ppc64_pft_size)
+ return 0;
+
return mmu_hash_ops.resize_hpt(target_hpt_shift);
}
@@ -879,6 +892,32 @@ void hash_batch_expand_prepare(unsigned long newsize)
break;
}
}
+
+void hash_batch_shrink_begin(void)
+{
+ /* Disable HPT resize-down during hot-unplug */
+ mutex_lock(&hpt_resize_down_lock);
+}
+
+void hash_batch_shrink_end(void)
+{
+ const u64 starting_size = ppc64_pft_size;
+ unsigned long newsize;
+
+ newsize = memblock_phys_mem_size();
+ /* Resize to smallest SHIFT possible */
+ while (resize_hpt_for_hotplug(newsize, true) == -ENOSPC) {
+ newsize *= 2;
+ pr_warn("Hash collision while resizing HPT\n");
+
+ /* Do not try to resize to the starting size, or bigger value */
+ if (htab_shift_for_mem_size(newsize) >= starting_size)
+ break;
+ }
+
+ /* Re-enables HPT resize-down after hot-unplug */
+ mutex_unlock(&hpt_resize_down_lock);
+}
#endif /* CONFIG_MEMORY_HOTPLUG */
static void __init hash_init_partition_table(phys_addr_t hash_table,
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 48b2cfe4ce69..44bc50d72353 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -426,6 +426,9 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
return -EINVAL;
}
+ if (!radix_enabled())
+ hash_batch_shrink_begin();
+
for_each_drmem_lmb(lmb) {
rc = dlpar_remove_lmb(lmb);
if (rc)
@@ -471,6 +474,9 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
rc = 0;
}
+ if (!radix_enabled())
+ hash_batch_shrink_end();
+
return rc;
}
@@ -533,6 +539,9 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (lmbs_available < lmbs_to_remove)
return -EINVAL;
+ if (!radix_enabled())
+ hash_batch_shrink_begin();
+
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
continue;
@@ -573,6 +582,9 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
}
}
+ if (!radix_enabled())
+ hash_batch_shrink_end();
+
return rc;
}
@@ -703,6 +715,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
if (lmbs_added != lmbs_to_add) {
pr_err("Memory hot-add failed, removing any added LMBs\n");
+ if (!radix_enabled())
+ hash_batch_shrink_begin();
+
for_each_drmem_lmb(lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
@@ -716,6 +731,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
drmem_remove_lmb_reservation(lmb);
}
+
+ if (!radix_enabled())
+ hash_batch_shrink_end();
+
rc = -EINVAL;
} else {
for_each_drmem_lmb(lmb) {
@@ -817,6 +836,9 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
if (rc) {
pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
+ if (!radix_enabled())
+ hash_batch_shrink_begin();
+
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
@@ -830,6 +852,10 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
drmem_remove_lmb_reservation(lmb);
}
+
+ if (!radix_enabled())
+ hash_batch_shrink_end();
+
rc = -EINVAL;
} else {
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
--
2.30.2
Powered by blists - more mailing lists