[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1437208216-15729-13-git-send-email-jcmvbkbc@gmail.com>
Date: Sat, 18 Jul 2015 11:30:15 +0300
From: Max Filippov <jcmvbkbc@...il.com>
To: linux-xtensa@...ux-xtensa.org, linux-kernel@...r.kernel.org
Cc: Chris Zankel <chris@...kel.net>, Marc Gauthier <marc@...ence.com>,
Max Filippov <jcmvbkbc@...il.com>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>
Subject: [PATCH v2 12/13] xtensa: implement fake NMI
In case perf IRQ is the highest of the medium-level IRQs, and is alone
on its level, it may be treated as NMI:
- LOCKLEVEL is defined to be one level less than EXCM level,
- IRQ masking never lowers current IRQ level,
- new fake exception cause code, EXCCAUSE_MAPPED_NMI is assigned to that
IRQ; new second level exception handler, do_nmi, assigned to it
handles it as NMI,
- atomic operations in configurations without s32c1i still need to mask
all interrupts.
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Signed-off-by: Max Filippov <jcmvbkbc@...il.com>
---
arch/xtensa/include/asm/atomic.h | 10 ++--
arch/xtensa/include/asm/cmpxchg.h | 4 +-
arch/xtensa/include/asm/irqflags.h | 22 ++++++++-
arch/xtensa/include/asm/processor.h | 31 ++++++++++++-
arch/xtensa/kernel/entry.S | 93 +++++++++++++++++++++++++++++++------
arch/xtensa/kernel/irq.c | 8 ++++
arch/xtensa/kernel/perf_event.c | 6 ++-
arch/xtensa/kernel/traps.c | 26 +++++++++++
arch/xtensa/kernel/vectors.S | 10 +++-
9 files changed, 183 insertions(+), 27 deletions(-)
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 00b7d46..ebcd1f6 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -29,7 +29,7 @@
*
* Locking interrupts looks like this:
*
- * rsil a15, LOCKLEVEL
+ * rsil a15, TOPLEVEL
* <code>
* wsr a15, PS
* rsync
@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15, "__stringify(LOCKLEVEL)"\n"\
+ " rsil a15, "__stringify(TOPLEVEL)"\n"\
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15,"__stringify(LOCKLEVEL)"\n" \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
@@ -272,7 +272,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
unsigned int vval;
__asm__ __volatile__(
- " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " rsil a15,"__stringify(TOPLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
@@ -306,7 +306,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
unsigned int vval;
__asm__ __volatile__(
- " rsil a15,"__stringify(LOCKLEVEL)"\n"
+ " rsil a15,"__stringify(TOPLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 370b26f..201e900 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
return new;
#else
__asm__ __volatile__(
- " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else
unsigned long tmp;
__asm__ __volatile__(
- " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" wsr a15, ps\n"
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index ea36674..8e090c7 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -6,6 +6,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_IRQFLAGS_H
@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void)
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ unsigned long tmp;
+
+ asm volatile("rsr %0, ps\t\n"
+ "extui %1, %0, 0, 4\t\n"
+ "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
+ "rsil %0, "__stringify(LOCKLEVEL)"\n"
+ "1:"
+ : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+ asm volatile("rsr %0, ps\t\n"
+ "or %0, %0, %1\t\n"
+ "xsr %0, ps\t\n"
+ "rsync"
+ : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+ asm volatile("rsil %0, "__stringify(LOCKLEVEL)
: "=a" (flags) :: "memory");
+#endif
return flags;
}
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index b61bdf0..9ed9b4a 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -1,11 +1,10 @@
/*
- * include/asm-xtensa/processor.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_PROCESSOR_H
@@ -45,6 +44,14 @@
#define STACK_TOP_MAX STACK_TOP
/*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI 62
+
+/*
* General exception cause assigned to debug exceptions. Debug exceptions go
* to their own vector, rather than the general exception vectors (user,
* kernel, double); and their specific causes are reported via DEBUGCAUSE
@@ -65,10 +72,30 @@
#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+#define XTENSA_INT_LEVEL(a) _XTENSA_INT_LEVEL(a)
+#define _XTENSA_INT_LEVEL(a) XCHAL_INT##a##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(a) _XTENSA_INTLEVEL_MASK(a)
+#define _XTENSA_INTLEVEL_MASK(a) (XCHAL_INTLEVEL##a##_MASK)
+
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
/* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts.
*/
+#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
+ defined(XCHAL_PROFILING_INTERRUPT) && \
+ PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+ XCHAL_EXCM_LEVEL > 1 && \
+ IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
+#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
+#else
#define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 309e71b..dd766ed 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1,6 +1,4 @@
/*
- * arch/xtensa/kernel/entry.S
- *
* Low-level exception handling
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -8,6 +6,7 @@
* for more details.
*
* Copyright (C) 2004 - 2008 by Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*
* Chris Zankel <chris@...kel.net>
*
@@ -79,6 +78,27 @@
#endif
.endm
+
+ .macro irq_save flags tmp
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ rsr \flags, ps
+ extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ bgei \tmp, LOCKLEVEL, 99f
+ rsil \tmp, LOCKLEVEL
+99:
+#else
+ movi \tmp, LOCKLEVEL
+ rsr \flags, ps
+ or \flags, \flags, \tmp
+ xsr \flags, ps
+ rsync
+#endif
+#else
+ rsil \flags, LOCKLEVEL
+#endif
+ .endm
+
/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
/*
@@ -366,11 +386,11 @@ common_exception:
/* It is now save to restore the EXC_TABLE_FIXUP variable. */
- rsr a0, exccause
+ rsr a2, exccause
movi a3, 0
- rsr a2, excsave1
- s32i a0, a1, PT_EXCCAUSE
- s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a0, excsave1
+ s32i a2, a1, PT_EXCCAUSE
+ s32i a3, a0, EXC_TABLE_FIXUP
/* All unrecoverable states are saved on stack, now, and a1 is valid.
* Now we can allow exceptions again. In case we've got an interrupt
@@ -381,21 +401,48 @@ common_exception:
*/
rsr a3, ps
- addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
- movi a2, LOCKLEVEL
+ s32i a3, a1, PT_PS # save ps
+
+#if XTENSA_FAKE_NMI
+ /* Correct PS needs to be saved in the PT_PS:
+ * - in case of exception or level-1 interrupt it's in the PS,
+ * and is already saved.
+ * - in case of medium level interrupt it's in the excsave2.
+ */
+ movi a0, EXCCAUSE_MAPPED_NMI
+ extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+ beq a2, a0, .Lmedium_level_irq
+ bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
+ beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0
+
+.Lmedium_level_irq:
+ rsr a0, excsave2
+ s32i a0, a1, PT_PS # save medium-level interrupt ps
+ bgei a3, LOCKLEVEL, .Lexception
+
+.Llevel1_irq:
+ movi a3, LOCKLEVEL
+
+.Lexception:
+ movi a0, 1 << PS_WOE_BIT
+ or a3, a3, a0
+#else
+ addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
+ movi a0, LOCKLEVEL
extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
# a3 = PS.INTLEVEL
- moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
+ moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a2, exccause
+#endif
+
#ifdef XTENSA_CONTINUOUS_STACK
/* restore return address (or 0 if return to userspace) */
rsr a0, depc
#endif
- xsr a3, ps
-
- s32i a3, a1, PT_PS # save ps
+ wsr a3, ps
+ rsync # PS.WOE => rsync => overflow
/* Save lbeg, lend */
@@ -433,8 +480,13 @@ common_exception:
.global common_exception_return
common_exception_return:
+#if XTENSA_FAKE_NMI
+ l32i a2, a1, PT_EXCCAUSE
+ movi a3, EXCCAUSE_MAPPED_NMI
+ beq a2, a3, .LNMIexit
+#endif
1:
- rsil a2, LOCKLEVEL
+ irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS
movi a4, trace_hardirqs_off
callx4 a4
@@ -497,6 +549,12 @@ common_exception_return:
j 1b
#endif
+#if XTENSA_FAKE_NMI
+.LNMIexit:
+ l32i a3, a1, PT_PS
+ _bbci.l a3, PS_UM_BIT, 4f
+#endif
+
5:
#ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC
@@ -1579,6 +1637,13 @@ ENTRY(fast_second_level_miss)
rfde
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
+ bnez a0, 8b
+
+ /* Even more unlikely case active_mm == 0.
+ * We can get here with NMI in the middle of context_switch that
+ * touches vmalloc area.
+ */
+ movi a0, init_mm
j 8b
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -1882,7 +1947,7 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer. */
- rsil a14, LOCKLEVEL
+ irq_save a14, a3
rsync
/* Switch CPENABLE */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 32b6056..8d4f5de 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -29,6 +29,7 @@
#include <asm/platform.h>
atomic_t irq_err_count;
+DECLARE_PER_CPU(unsigned long, nmi_count);
asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
@@ -57,11 +58,18 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
int arch_show_interrupts(struct seq_file *p, int prec)
{
+ unsigned cpu __maybe_unused;
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
+#ifdef XTENSA_FAKE_NMI
+ seq_printf(p, "%*s:", prec, "NMI");
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
+ seq_puts(p, " Non-maskable interrupts\n");
+#endif
return 0;
}
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index b44df3c..54f0118 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -359,7 +359,7 @@ void perf_event_print_debug(void)
local_irq_restore(flags);
}
-static irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
{
irqreturn_t rc = IRQ_NONE;
struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
@@ -436,10 +436,14 @@ static int __init xtensa_pmu_init(void)
int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
perf_cpu_notifier(xtensa_pmu_notifier);
+#if XTENSA_FAKE_NMI
+ enable_irq(irq);
+#else
ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
"pmu", NULL);
if (ret < 0)
return ret;
+#endif
ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
if (ret)
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index a1b5bd2..42d441f 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -62,6 +62,7 @@ extern void fast_coprocessor(void);
extern void do_illegal_instruction (struct pt_regs*);
extern void do_interrupt (struct pt_regs*);
+extern void do_nmi(struct pt_regs *);
extern void do_unaligned_user (struct pt_regs*);
extern void do_multihit (struct pt_regs*, unsigned long);
extern void do_page_fault (struct pt_regs*, unsigned long);
@@ -146,6 +147,9 @@ COPROCESSOR(6),
#if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7),
#endif
+#if XTENSA_FAKE_NMI
+{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
+#endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
{ -1, -1, 0 }
@@ -199,6 +203,28 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause)
extern void do_IRQ(int, struct pt_regs *);
+#if XTENSA_FAKE_NMI
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
+
+DEFINE_PER_CPU(unsigned long, nmi_count);
+
+void do_nmi(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
+ trace_hardirqs_off();
+
+ old_regs = set_irq_regs(regs);
+ nmi_enter();
+ ++*this_cpu_ptr(&nmi_count);
+ xtensa_pmu_irq_handler(0, NULL);
+ nmi_exit();
+ set_irq_regs(old_regs);
+}
+#endif
+
void do_interrupt(struct pt_regs *regs)
{
static const unsigned int_level_mask[] = {
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 1b397a9..abcdb52 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -627,7 +627,11 @@ ENTRY(_Level\level\()InterruptVector)
wsr a0, excsave2
rsr a0, epc\level
wsr a0, epc1
+ .if \level <= LOCKLEVEL
movi a0, EXCCAUSE_LEVEL1_INTERRUPT
+ .else
+ movi a0, EXCCAUSE_MAPPED_NMI
+ .endif
wsr a0, exccause
rsr a0, eps\level
# branch to user or kernel vector
@@ -682,11 +686,13 @@ ENDPROC(_WindowOverflow4)
.align 4
_SimulateUserKernelVectorException:
addi a0, a0, (1 << PS_EXCM_BIT)
+#if !XTENSA_FAKE_NMI
wsr a0, ps
+#endif
bbsi.l a0, PS_UM_BIT, 1f # branch if user mode
- rsr a0, excsave2 # restore a0
+ xsr a0, excsave2 # restore a0
j _KernelExceptionVector # simulate kernel vector exception
-1: rsr a0, excsave2 # restore a0
+1: xsr a0, excsave2 # restore a0
j _UserExceptionVector # simulate user vector exception
#endif
--
1.8.1.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists