[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1548946743-38979-9-git-send-email-julien.thierry@arm.com>
Date: Thu, 31 Jan 2019 14:58:46 +0000
From: Julien Thierry <julien.thierry@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, daniel.thompson@...aro.org,
joel@...lfernandes.org, marc.zyngier@....com,
christoffer.dall@....com, james.morse@....com,
catalin.marinas@....com, will.deacon@....com, mark.rutland@....com,
Julien Thierry <julien.thierry@....com>,
Oleg Nesterov <oleg@...hat.com>,
Dave Martin <Dave.Martin@....com>
Subject: [PATCH v10 08/25] arm64: Make PMR part of task context
In order to replace PSR.I interrupt disabling/enabling with ICC_PMR_EL1
interrupt masking, ICC_PMR_EL1 needs to be saved/restored when
taking/returning from an exception. This mimics the way hardware saves
and restores PSR.I bit in spsr_el1 for exceptions and ERET.
Add PMR to the registers to save in the pt_regs struct upon kernel entry,
and restore it before ERET. Also, initialize it to a sane value when
creating new tasks.
Signed-off-by: Julien Thierry <julien.thierry@....com>
Reviewed-by: Catalin Marinas <catalin.marinas@....com>
Reviewed-by: Marc Zyngier <marc.zyngier@....com>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will.deacon@....com>
Cc: Oleg Nesterov <oleg@...hat.com>
Cc: Dave Martin <Dave.Martin@....com>
---
arch/arm64/include/asm/processor.h | 3 +++
arch/arm64/include/asm/ptrace.h | 14 +++++++++++---
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry.S | 14 ++++++++++++++
arch/arm64/kernel/process.c | 6 ++++++
5 files changed, 35 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f1a7ab1..5d9ce62 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -191,6 +191,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
memset(regs, 0, sizeof(*regs));
forget_syscall(regs);
regs->pc = pc;
+
+ if (system_uses_irq_prio_masking())
+ regs->pmr_save = GIC_PRIO_IRQON;
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 8b131bc..ec60174 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -19,6 +19,8 @@
#ifndef __ASM_PTRACE_H
#define __ASM_PTRACE_H
+#include <asm/cpufeature.h>
+
#include <uapi/asm/ptrace.h>
/* Current Exception Level values, as contained in CurrentEL */
@@ -179,7 +181,8 @@ struct pt_regs {
#endif
u64 orig_addr_limit;
- u64 unused; // maintain 16 byte alignment
+ /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
+ u64 pmr_save;
u64 stackframe[2];
};
@@ -214,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs)
#define processor_mode(regs) \
((regs)->pstate & PSR_MODE_MASK)
-#define interrupts_enabled(regs) \
- (!((regs)->pstate & PSR_I_BIT))
+#define irqs_priority_unmasked(regs) \
+ (system_uses_irq_prio_masking() ? \
+ (regs)->pmr_save == GIC_PRIO_IRQON : \
+ true)
+
+#define interrupts_enabled(regs) \
+ (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 65b8afc..90ab2cf 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -81,6 +81,7 @@ int main(void)
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 0ec0c46..35a47f6 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -249,6 +249,12 @@ alternative_else_nop_endif
msr sp_el0, tsk
.endif
+ /* Save pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mrs_s x20, SYS_ICC_PMR_EL1
+ str x20, [sp, #S_PMR_SAVE]
+alternative_else_nop_endif
+
/*
* Registers that may be useful after this macro is invoked:
*
@@ -269,6 +275,14 @@ alternative_else_nop_endif
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
+ /* Restore pmr */
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ ldr x20, [sp, #S_PMR_SAVE]
+ msr_s SYS_ICC_PMR_EL1, x20
+ /* Ensure priority change is seen by redistributor */
+ dsb sy
+alternative_else_nop_endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a0f985a..6d410fc 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -232,6 +232,9 @@ void __show_regs(struct pt_regs *regs)
printk("sp : %016llx\n", sp);
+ if (system_uses_irq_prio_masking())
+ printk("pmr_save: %08llx\n", regs->pmr_save);
+
i = top_reg;
while (i >= 0) {
@@ -363,6 +366,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
childregs->pstate |= PSR_SSBS_BIT;
+ if (system_uses_irq_prio_masking())
+ childregs->pmr_save = GIC_PRIO_IRQON;
+
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
--
1.9.1
Powered by blists - more mailing lists