[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191108174801.381317731@linuxfoundation.org>
Date: Fri, 8 Nov 2019 19:50:47 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Sandipan Das <sandipan@...ux.ibm.com>
Subject: [PATCH 4.14 59/62] powerpc/book3s64/radix: Rename CPU_FTR_P9_TLBIE_BUG feature flag
From: "Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>
commit 09ce98cacd51fcd0fa0af2f79d1e1d3192f4cbb0 upstream.
Rename the #define to indicate this is related to store vs tlbie
ordering issue. In the next patch, we will be adding another feature
flag that is used to handles ERAT flush vs tlbie ordering issue.
Cc: stable@...r.kernel.org # v4.14
Fixes: a5d4b5891c2f ("powerpc/mm: Fixup tlbie vs store ordering issue on POWER9")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@...ux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@...erman.id.au>
Link: https://lore.kernel.org/r/20190924035254.24612-2-aneesh.kumar@linux.ibm.com
[sandipan: Backported to v4.14]
Signed-off-by: Sandipan Das <sandipan@...ux.ibm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/powerpc/include/asm/cputable.h | 4 ++--
arch/powerpc/kernel/dt_cpu_ftrs.c | 6 +++---
arch/powerpc/kvm/book3s_64_mmu_radix.c | 2 +-
arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 +-
arch/powerpc/mm/hash_native_64.c | 2 +-
arch/powerpc/mm/tlb-radix.c | 2 +-
6 files changed, 9 insertions(+), 9 deletions(-)
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -215,7 +215,7 @@ enum {
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x2000000000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#ifndef __ASSEMBLY__
@@ -477,7 +477,7 @@ enum {
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
- CPU_FTR_P9_TLBIE_BUG)
+ CPU_FTR_P9_TLBIE_STQ_BUG)
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
(~CPU_FTR_SAO))
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -747,14 +747,14 @@ static __init void update_tlbie_feature_
if ((pvr & 0xe000) == 0) {
/* Nimbus */
if ((pvr & 0xfff) < 0x203)
- cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
} else if ((pvr & 0xc000) == 0) {
/* Cumulus */
if ((pvr & 0xfff) < 0x103)
- cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
} else {
WARN_ONCE(1, "Unknown PVR");
- cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
}
}
}
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -160,7 +160,7 @@ static void kvmppc_radix_tlbie_page(stru
asm volatile("ptesync": : :"memory");
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG))
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
asm volatile("ptesync": : :"memory");
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -449,7 +449,7 @@ static void do_tlbies(struct kvm *kvm, u
"r" (rbvalues[i]), "r" (kvm->arch.lpid));
}
- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
/*
* Need the extra ptesync to make sure we don't
* re-order the tlbie
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -106,7 +106,7 @@ static inline unsigned long ___tlbie(un
static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
/* Need the extra ptesync to ensure we don't reorder tlbie*/
asm volatile("ptesync": : :"memory");
___tlbie(vpn, psize, apsize, ssize);
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -44,7 +44,7 @@ static inline void fixup_tlbie(void)
unsigned long pid = 0;
unsigned long va = ((1UL << 52) - 1);
- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
asm volatile("ptesync": : :"memory");
__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
}
Powered by blists - more mailing lists