lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180123122809.16269-16-suzuki.poulose@arm.com>
Date:   Tue, 23 Jan 2018 12:28:08 +0000
From:   Suzuki K Poulose <suzuki.poulose@....com>
To:     linux-arm-kernel@...ts.infradead.org
Cc:     linux-kernel@...r.kernel.org, ard.biesheuvel@...aro.org,
        will.deacon@....com, mark.rutland@....com, marc.zyngier@....com,
        catalin.marinas@....com, ckadabi@...eaurora.org,
        jnair@...iumnetworks.com, Suzuki K Poulose <suzuki.poulose@....com>
Subject: [PATCH 15/16] arm64: Delay enabling hardware DBM feature

We enable hardware DBM bit in a capable CPU, very early in the
boot via __cpu_setup. This doesn't give us a flexibility of
optionally disable the feature, as the clearing the bit
is a bit costly as the TLB can cache the settings. Instead,
we delay enabling the feature until the CPU is brought up
into the kernel. We use the feature capability mechanism
to handle it.

The hardware DBM is a non-conflicting feature. i.e, the kernel
can safely run with a mix of CPUs with some using the feature
and the others don't. So, it is safe for a late CPU to have
this capability and enable it, even if the active CPUs don't.

To get this handled properly by the infrastructure, we
unconditionally set the capability and only enable it
on CPUs which really have the feature. Adds a new type
of feature to the capability infrastructure which
ignores the conflict in a late CPU.

Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com>
---
 arch/arm64/include/asm/cpucaps.h    |  3 ++-
 arch/arm64/include/asm/cpufeature.h |  8 +++++++
 arch/arm64/kernel/cpufeature.c      | 42 +++++++++++++++++++++++++++++++++++++
 arch/arm64/mm/proc.S                |  5 +----
 4 files changed, 53 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index bb263820de13..8df80cc828ac 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -45,7 +45,8 @@
 #define ARM64_HARDEN_BRANCH_PREDICTOR		24
 #define ARM64_HARDEN_BP_POST_GUEST_EXIT		25
 #define ARM64_HAS_RAS_EXTN			26
+#define ARM64_HW_DBM				27
 
-#define ARM64_NCAPS				27
+#define ARM64_NCAPS				28
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 70712de687c7..243ec7c77c79 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -126,6 +126,14 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
  */
 #define ARM64_CPUCAP_STRICT_CPU_LOCAL_FEATURE	\
 	(ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_LATE_CPU_SAFE_TO_MISS)
+/*
+ * CPU feature detected on each local CPU. It is safe for a late CPU to
+ * either have it or not.
+ */
+#define ARM64_CPUCAP_WEAK_CPU_LOCAL_FEATURE	 \
+	(ARM64_CPUCAP_SCOPE_LOCAL_CPU		|\
+	 ARM64_CPUCAP_LATE_CPU_SAFE_TO_MISS	|\
+	 ARM64_CPUCAP_LATE_CPU_SAFE_TO_HAVE)
 
 struct arm64_cpu_capabilities {
 	const char *desc;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2627a836e99d..8af755b8219d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -894,6 +894,35 @@ static int __init parse_kpti(char *str)
 __setup("kpti=", parse_kpti);
 #endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+static bool has_hw_dbm(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	/*
+	 * DBM is a non-conflicting feature. i.e, the kernel can safely run
+	 * a mix of CPUs with and without the feature. So, we unconditionally
+	 * enable the capability to allow any late CPU to use the feature.
+	 * We only enable the control bits on the CPU, if it actually supports.
+	 */
+	return true;
+}
+
+static inline void __cpu_enable_hw_dbm(void)
+{
+	u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
+
+	write_sysreg(tcr, tcr_el1);
+	isb();
+}
+
+static int cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
+{
+	if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU))
+		__cpu_enable_hw_dbm();
+
+	return 0;
+}
+#endif
+
 static int cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
 {
 	/*
@@ -1052,6 +1081,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.enable = cpu_clear_disr,
 	},
 #endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_HW_AFDBM
+	{
+		.desc = "Hardware pagetable Dirty Bit Management",
+		.type = ARM64_CPUCAP_WEAK_CPU_LOCAL_FEATURE,
+		.capability = ARM64_HW_DBM,
+		.sys_reg = SYS_ID_AA64MMFR1_EL1,
+		.sign = FTR_UNSIGNED,
+		.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
+		.min_field_value = 2,
+		.matches = has_hw_dbm,
+		.enable = cpu_enable_hw_dbm,
+	},
+#endif
 	{},
 };
 
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c6a12073ef46..73897dd2e555 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -254,10 +254,7 @@ ENTRY(__cpu_setup)
 	mrs	x9, ID_AA64MMFR1_EL1
 	and	x9, x9, #0xf
 	cbz	x9, 2f
-	cmp	x9, #2
-	b.lt	1f
-	orr	x10, x10, #TCR_HD		// hardware Dirty flag update
-1:	orr	x10, x10, #TCR_HA		// hardware Access flag update
+	orr	x10, x10, #TCR_HA		// hardware Access flag update
 2:
 #endif	/* CONFIG_ARM64_HW_AFDBM */
 	msr	tcr_el1, x10
-- 
2.13.6

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ