lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240409012344.3194724-7-liaochang1@huawei.com>
Date: Tue, 9 Apr 2024 01:23:41 +0000
From: Liao Chang <liaochang1@...wei.com>
To: <catalin.marinas@....com>, <will@...nel.org>, <maz@...nel.org>,
	<oliver.upton@...ux.dev>, <james.morse@....com>, <suzuki.poulose@....com>,
	<yuzenghui@...wei.com>, <tglx@...utronix.de>, <mark.rutland@....com>,
	<ardb@...nel.org>, <broonie@...nel.org>, <liaochang1@...wei.com>,
	<anshuman.khandual@....com>, <miguel.luis@...cle.com>, <joey.gouly@....com>,
	<ryan.roberts@....com>, <jeremy.linton@....com>, <liwei391@...wei.com>,
	<daniel.thompson@...aro.org>, <sumit.garg@...aro.org>,
	<kristina.martsenko@....com>, <jpoimboe@...nel.org>, <ericchancf@...gle.com>,
	<robh@...nel.org>, <scott@...amperecomputing.com>,
	<songshuaishuai@...ylab.org>, <shijie@...amperecomputing.com>,
	<bhe@...hat.com>, <akpm@...ux-foundation.org>, <thunder.leizhen@...wei.com>,
	<horms@...nel.org>, <rmk+kernel@...linux.org.uk>, <takakura@...inux.co.jp>,
	<dianders@...omium.org>, <swboyd@...omium.org>, <frederic@...nel.org>,
	<reijiw@...gle.com>, <akihiko.odaki@...nix.com>, <ruanjinjie@...wei.com>
CC: <linux-arm-kernel@...ts.infradead.org>, <linux-kernel@...r.kernel.org>,
	<kvmarm@...ts.linux.dev>
Subject: [PATCH 6/9] arm64: daifflags: Add logical exception masks covering DAIF + PMR + ALLINT

In Mark Brown's support for FEAT_NMI patchset [1], Mark Rutland suggest
to refactor the way of DAIF management via adding new "logical exception
mask" helpers that treat DAIF + PMR + ALLINT as separate elements.

A series of new exception mask helpers that has a similar interface as
the existing counterparts, which starts with "local_allint_". The usage
and behavior of new ones suppose to align with the old ones, otherwise,
some unexpected result will occurs.

[1] https://lore.kernel.org/linux-arm-kernel/Y4sH5qX5bK9xfEBp@lpieralisi/

Signed-off-by: Liao Chang <liaochang1@...wei.com>
---
 arch/arm64/include/asm/daifflags.h   | 240 +++++++++++++++++++++++++++
 arch/arm64/include/uapi/asm/ptrace.h |   1 +
 2 files changed, 241 insertions(+)

diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 55f57dfa8e2f..df4c4989babd 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -11,6 +11,7 @@
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
 #include <asm/ptrace.h>
+#include <asm/nmi.h>
 
 #define DAIF_PROCCTX		0
 #define DAIF_PROCCTX_NOIRQ	(PSR_I_BIT | PSR_F_BIT)
@@ -141,4 +142,243 @@ static inline void local_daif_inherit(struct pt_regs *regs)
 	 */
 	write_sysreg(flags, daif);
 }
+
+/*
+ * For Arm64 processor support Armv8.8 or later, kernel supports three types
+ * of irqflags, they used for corresponding configuration depicted as below:
+ *
+ * 1. When CONFIG_ARM64_PSEUDO_NMI and CONFIG_ARM64_NMI are not 'y', kernel
+ *    does not support handling NMI.
+ *
+ * 2. When CONFIG_ARM64_PSEUDO_NMI=y and irqchip.gicv3_pseudo_nmi=1, kernel
+ *    makes use of the CPU Interface PMR and GIC priority feature to support
+ *    handling NMI.
+ *
+ * 3. When CONFIG_ARM64_NMI=y and irqchip.gicv3_pseudo_nmi is not enabled,
+ *    kernel makes use of the FEAT_NMI extension added since Armv8.8 to
+ *    support handling NMI.
+ */
+union arch_irqflags {
+	unsigned long flags;
+	struct {
+		unsigned long pmr : 8;     // SYS_ICC_PMR_EL1
+		unsigned long daif : 10;   // PSTATE.DAIF at bits[6-9]
+		unsigned long allint : 14; // PSTATE.ALLINT at bits[13]
+	} fields;
+};
+
+typedef union arch_irqflags arch_irqflags_t;
+
+static inline void __pmr_local_allint_mask(void)
+{
+	WARN_ON(system_has_prio_mask_debugging() &&
+		(read_sysreg_s(SYS_ICC_PMR_EL1) ==
+		 (GIC_PRIO_IRQOFF | GIC_PRIO_PSR_I_SET)));
+	/*
+	 * Don't really care for a dsb here, we don't intend to enable
+	 * IRQs.
+	 */
+	gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+}
+
+static inline void __nmi_local_allint_mask(void)
+{
+	_allint_set();
+}
+
+static inline void local_allint_mask(void)
+{
+	asm volatile(
+		"msr	daifset, #0xf		// local_daif_mask\n"
+		:
+		:
+		: "memory");
+
+	if (system_uses_irq_prio_masking())
+		__pmr_local_allint_mask();
+	else if (system_uses_nmi())
+		__nmi_local_allint_mask();
+
+	trace_hardirqs_off();
+}
+
+static inline arch_irqflags_t __pmr_local_allint_save_flags(void)
+{
+	arch_irqflags_t irqflags;
+
+	irqflags.fields.pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
+	irqflags.fields.daif = read_sysreg(daif);
+	irqflags.fields.allint = 0;
+	/*
+	 * If IRQs are masked with PMR, reflect it in the daif of irqflags.
+	 * If NMIs and IRQs are masked with PMR, reflect it in the daif and
+	 * allint of irqflags, this avoid the need of checking PSTATE.A in
+	 * local_allint_restore() to determine if NMIs are masked.
+	 */
+	switch (irqflags.fields.pmr) {
+	case GIC_PRIO_IRQON:
+		break;
+
+	case __GIC_PRIO_IRQOFF:
+	case __GIC_PRIO_IRQOFF_NS:
+		irqflags.fields.daif |= PSR_I_BIT | PSR_F_BIT;
+		break;
+
+	case GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET:
+		irqflags.fields.allint = 1;
+		break;
+
+	default:
+		WARN_ON(1);
+	}
+
+	return irqflags;
+}
+
+static inline arch_irqflags_t __nmi_local_allint_save_flags(void)
+{
+	arch_irqflags_t irqflags;
+
+	irqflags.fields.daif = read_sysreg(daif);
+	irqflags.fields.allint = read_sysreg_s(SYS_ALLINT);
+
+	return irqflags;
+}
+
+static inline arch_irqflags_t local_allint_save_flags(void)
+{
+	arch_irqflags_t irqflags = { .flags = 0UL };
+
+	if (system_uses_irq_prio_masking())
+		return __pmr_local_allint_save_flags();
+	else if (system_uses_nmi())
+		return __nmi_local_allint_save_flags();
+
+	irqflags.fields.daif = read_sysreg(daif);
+	return irqflags;
+}
+
+static inline arch_irqflags_t local_allint_save(void)
+{
+	arch_irqflags_t irqflags;
+
+	irqflags = local_allint_save_flags();
+
+	local_allint_mask();
+
+	return irqflags;
+}
+
+static inline void gic_pmr_prio_check(void)
+{
+	WARN_ON(system_has_prio_mask_debugging() &&
+		(read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) !=
+		(PSR_I_BIT | PSR_F_BIT));
+}
+
+static inline void __pmr_local_allint_restore(arch_irqflags_t irqflags)
+{
+	unsigned long pmr = irqflags.fields.pmr;
+	unsigned long daif = irqflags.fields.daif;
+	unsigned long allint = irqflags.fields.allint;
+
+	gic_pmr_prio_check();
+
+	gic_write_pmr(pmr);
+
+	if (!(daif & PSR_I_BIT)) {
+		pmr_sync();
+	} else if (!allint) {
+		/*
+		 * Use arch_allint.fields.allint to indicates we can take
+		 * NMIs, instead of the old hacking style that use PSTATE.A.
+		 *
+		 * There has been concern that the write to daif
+		 * might be reordered before this write to PMR.
+		 * From the ARM ARM DDI 0487D.a, section D1.7.1
+		 * "Accessing PSTATE fields":
+		 *   Writes to the PSTATE fields have side-effects on
+		 *   various aspects of the PE operation. All of these
+		 *   side-effects are guaranteed:
+		 *     - Not to be visible to earlier instructions in
+		 *       the execution stream.
+		 *     - To be visible to later instructions in the
+		 *       execution stream
+		 *
+		 * Also, writes to PMR are self-synchronizing, so no
+		 * interrupts with a lower priority than PMR is signaled
+		 * to the PE after the write.
+		 *
+		 * So we don't need additional synchronization here.
+		 */
+		daif &= ~(PSR_I_BIT | PSR_F_BIT);
+	}
+	write_sysreg(daif, daif);
+}
+
+static inline void __nmi_local_allint_restore(arch_irqflags_t irqflags)
+{
+	if (irqflags.fields.allint)
+		_allint_set();
+	else
+		_allint_clear();
+
+	write_sysreg(irqflags.fields.daif, daif);
+}
+
+static inline int local_allint_disabled(arch_irqflags_t irqflags)
+{
+	return irqflags.fields.allint || (irqflags.fields.daif & PSR_I_BIT);
+}
+
+/*
+ * It has to conside the different kernel configure and parameters, that need
+ * to use coresspoding operations to mask interrupts properly. For example, the
+ * kernel disable PSEUDO_NMI, the kernel uses prio masking to support
+ * PSEUDO_NMI, or the kernel uses FEAT_NMI extension to support PSEUDO_NMI.
+ */
+static inline void local_allint_restore(arch_irqflags_t irqflags)
+{
+	int irq_disabled = local_allint_disabled(irqflags);
+
+	if (!irq_disabled)
+		trace_hardirqs_on();
+
+	if (system_uses_irq_prio_masking())
+		__pmr_local_allint_restore(irqflags);
+	else if (system_uses_nmi())
+		__nmi_local_allint_restore(irqflags);
+	else
+		write_sysreg(irqflags.fields.daif, daif);
+
+	if (irq_disabled)
+		trace_hardirqs_off();
+}
+
+/*
+ * Called by synchronous exception handlers to restore the DAIF bits that were
+ * modified by taking an exception.
+ */
+static inline void local_allint_inherit(struct pt_regs *regs)
+{
+	if (interrupts_enabled(regs))
+		trace_hardirqs_on();
+
+	if (system_uses_irq_prio_masking())
+		gic_write_pmr(regs->pmr_save);
+
+	/*
+	 * We can't use local_daif_restore(regs->pstate) here as
+	 * system_has_prio_mask_debugging() won't restore the I bit if it can
+	 * use the pmr instead.
+	 */
+	write_sysreg(regs->pstate & DAIF_MASK, daif);
+
+	if (system_uses_nmi()) {
+		if (regs->pstate & PSR_ALLINT_BIT)
+			_allint_set();
+		else
+			_allint_clear();
+	}
+}
 #endif
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 7fa2f7036aa7..8a125a1986be 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -48,6 +48,7 @@
 #define PSR_D_BIT	0x00000200
 #define PSR_BTYPE_MASK	0x00000c00
 #define PSR_SSBS_BIT	0x00001000
+#define PSR_ALLINT_BIT	0x00002000
 #define PSR_PAN_BIT	0x00400000
 #define PSR_UAO_BIT	0x00800000
 #define PSR_DIT_BIT	0x01000000
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ