[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1548946743-38979-13-git-send-email-julien.thierry@arm.com>
Date: Thu, 31 Jan 2019 14:58:50 +0000
From: Julien Thierry <julien.thierry@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, daniel.thompson@...aro.org,
joel@...lfernandes.org, marc.zyngier@....com,
christoffer.dall@....com, james.morse@....com,
catalin.marinas@....com, will.deacon@....com, mark.rutland@....com,
Julien Thierry <julien.thierry@....com>,
Ard Biesheuvel <ard.biesheuvel@...aro.org>,
Oleg Nesterov <oleg@...hat.com>
Subject: [PATCH v10 12/25] arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking
Instead disabling interrupts by setting the PSR.I bit, use a priority
higher than the one used for interrupts to mask them via PMR.
When using PMR to disable interrupts, the value of PMR will be used
instead of PSR.[DAIF] for the irqflags.
Signed-off-by: Julien Thierry <julien.thierry@....com>
Suggested-by: Daniel Thompson <daniel.thompson@...aro.org>
Acked-by: Ard Biesheuvel <ard.biesheuvel@...aro.org>
Reviewed-by: Catalin Marinas <catalin.marinas@....com>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will.deacon@....com>
Cc: Ard Biesheuvel <ard.biesheuvel@...aro.org>
Cc: Oleg Nesterov <oleg@...hat.com>
---
arch/arm64/include/asm/efi.h | 11 +++++
arch/arm64/include/asm/irqflags.h | 100 +++++++++++++++++++++++++++-----------
2 files changed, 83 insertions(+), 28 deletions(-)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..c9e9a69 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -44,6 +44,17 @@
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+/*
+ * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not.
+ * And EFI shouldn't really play around with priority masking as it is not aware
+ * which priorities the OS has assigned to its interrupts.
+ */
+#define arch_efi_save_flags(state_flags) \
+ ((void)((state_flags) = read_sysreg(daif)))
+
+#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif)
+
+
/* arch specific definitions used by the stub code */
/*
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 24692ed..d4597b2 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -18,7 +18,9 @@
#ifdef __KERNEL__
+#include <asm/alternative.h>
#include <asm/ptrace.h>
+#include <asm/sysreg.h>
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
@@ -36,33 +38,27 @@
/*
* CPU interrupt mask handling.
*/
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
- asm volatile(
- "mrs %0, daif // arch_local_irq_save\n"
- "msr daifset, #2"
- : "=r" (flags)
- :
- : "memory");
- return flags;
-}
-
static inline void arch_local_irq_enable(void)
{
- asm volatile(
- "msr daifclr, #2 // arch_local_irq_enable"
- :
+ asm volatile(ALTERNATIVE(
+ "msr daifclr, #2 // arch_local_irq_enable\n"
+ "nop",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
+ "dsb sy",
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
+ : "r" (GIC_PRIO_IRQON)
: "memory");
}
static inline void arch_local_irq_disable(void)
{
- asm volatile(
- "msr daifset, #2 // arch_local_irq_disable"
- :
+ asm volatile(ALTERNATIVE(
+ "msr daifset, #2 // arch_local_irq_disable",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0",
+ ARM64_HAS_IRQ_PRIO_MASKING)
:
+ : "r" (GIC_PRIO_IRQOFF)
: "memory");
}
@@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void)
*/
static inline unsigned long arch_local_save_flags(void)
{
+ unsigned long daif_bits;
unsigned long flags;
- asm volatile(
- "mrs %0, daif // arch_local_save_flags"
- : "=r" (flags)
- :
+
+ daif_bits = read_sysreg(daif);
+
+ /*
+ * The asm is logically equivalent to:
+ *
+ * if (system_uses_irq_prio_masking())
+ * flags = (daif_bits & PSR_I_BIT) ?
+ * GIC_PRIO_IRQOFF :
+ * read_sysreg_s(SYS_ICC_PMR_EL1);
+ * else
+ * flags = daif_bits;
+ */
+ asm volatile(ALTERNATIVE(
+ "mov %0, %1\n"
+ "nop\n"
+ "nop",
+ "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n"
+ "ands %1, %1, " __stringify(PSR_I_BIT) "\n"
+ "csel %0, %0, %2, eq",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (flags), "+r" (daif_bits)
+ : "r" (GIC_PRIO_IRQOFF)
: "memory");
+
+ return flags;
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+
+ arch_local_irq_disable();
+
return flags;
}
@@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void)
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile(
- "msr daif, %0 // arch_local_irq_restore"
- :
- : "r" (flags)
- : "memory");
+ asm volatile(ALTERNATIVE(
+ "msr daif, %0\n"
+ "nop",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
+ "dsb sy",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "+r" (flags)
+ :
+ : "memory");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return flags & PSR_I_BIT;
+ int res;
+
+ asm volatile(ALTERNATIVE(
+ "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+ "nop",
+ "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
+ "cset %w0, ls",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (res)
+ : "r" ((int) flags)
+ : "memory");
+
+ return res;
}
#endif
#endif
--
1.9.1
Powered by blists - more mailing lists