[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <470f6068e87a3627111a2615032d7b6bce824451.camel@marvell.com>
Date: Wed, 22 Jul 2020 14:55:33 +0000
From: Alex Belits <abelits@...vell.com>
To: "frederic@...nel.org" <frederic@...nel.org>,
"rostedt@...dmis.org" <rostedt@...dmis.org>
CC: Prasun Kapoor <pkapoor@...vell.com>,
"mingo@...nel.org" <mingo@...nel.org>,
"davem@...emloft.net" <davem@...emloft.net>,
"linux-api@...r.kernel.org" <linux-api@...r.kernel.org>,
"peterz@...radead.org" <peterz@...radead.org>,
"linux-arch@...r.kernel.org" <linux-arch@...r.kernel.org>,
"catalin.marinas@....com" <catalin.marinas@....com>,
"tglx@...utronix.de" <tglx@...utronix.de>,
"will@...nel.org" <will@...nel.org>,
"linux-arm-kernel@...ts.infradead.org"
<linux-arm-kernel@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>
Subject: [PATCH 08/13] task_isolation: arch/arm64: enable task isolation
functionality
From: Chris Metcalf <cmetcalf@...lanox.com>
In do_notify_resume(), call task_isolation_start() for
TIF_TASK_ISOLATION tasks. Add _TIF_TASK_ISOLATION to _TIF_WORK_MASK,
and define a local NOTIFY_RESUME_LOOP_FLAGS to check in the loop,
since we don't clear _TIF_TASK_ISOLATION in the loop.
We instrument the smp_send_reschedule() routine so that it checks for
isolated tasks and generates a suitable warning if needed.
Finally, report on page faults in task-isolation processes in
do_page_faults().
Early kernel entry code calls task_isolation_kernel_enter(). In
particular:
Vectors:
el1_sync -> el1_sync_handler() -> task_isolation_kernel_enter()
el1_irq -> asm_nmi_enter(), handle_arch_irq()
el1_error -> do_serror()
el0_sync -> el0_sync_handler()
el0_irq -> handle_arch_irq()
el0_error -> do_serror()
el0_sync_compat -> el0_sync_compat_handler()
el0_irq_compat -> handle_arch_irq()
el0_error_compat -> do_serror()
SDEI entry:
__sdei_asm_handler -> __sdei_handler() -> nmi_enter()
Functions called from there:
asm_nmi_enter() -> nmi_enter() -> task_isolation_kernel_enter()
asm_nmi_exit() -> nmi_exit() -> task_isolation_kernel_return()
Handlers:
do_serror() -> nmi_enter() -> task_isolation_kernel_enter()
or task_isolation_kernel_enter()
el1_sync_handler() -> task_isolation_kernel_enter()
el0_sync_handler() -> task_isolation_kernel_enter()
el0_sync_compat_handler() -> task_isolation_kernel_enter()
handle_arch_irq() is irqchip-specific, most call handle_domain_irq()
or handle_IPI()
There is a separate patch for irqchips that do not follow this rule.
handle_domain_irq() -> task_isolation_kernel_enter()
handle_IPI() -> task_isolation_kernel_enter()
nmi_enter() -> task_isolation_kernel_enter()
Signed-off-by: Chris Metcalf <cmetcalf@...lanox.com>
[abelits@...vell.com: simplified to match kernel 5.6]
Signed-off-by: Alex Belits <abelits@...vell.com>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/barrier.h | 2 ++
arch/arm64/include/asm/thread_info.h | 5 ++++-
arch/arm64/kernel/entry-common.c | 7 +++++++
arch/arm64/kernel/ptrace.c | 16 +++++++++++++++-
arch/arm64/kernel/sdei.c | 2 ++
arch/arm64/kernel/signal.c | 13 ++++++++++++-
arch/arm64/kernel/smp.c | 9 +++++++++
arch/arm64/mm/fault.c | 5 +++++
9 files changed, 57 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 66dc41fd49f2..96fefabfa10f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -137,6 +137,7 @@ config ARM64
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_STACKLEAK
+ select HAVE_ARCH_TASK_ISOLATION
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index fb4c27506ef4..bf4a2adabd5b 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -48,6 +48,8 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+#define instr_sync() isb()
+
/*
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
* and 0 otherwise.
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 5e784e16ee89..73269bb8a57d 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -67,6 +67,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
+#define TIF_TASK_ISOLATION 6 /* task isolation enabled for task */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
@@ -86,6 +87,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_TASK_ISOLATION (1 << TIF_TASK_ISOLATION)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -99,7 +101,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_FSCHECK)
+ _TIF_UPROBE | _TIF_FSCHECK | \
+ _TIF_TASK_ISOLATION)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index d3be9dbf5490..8b682aa020ae 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -8,6 +8,7 @@
#include <linux/context_tracking.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
+#include <linux/isolation.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
@@ -70,6 +71,8 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
+ task_isolation_kernel_enter();
+
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_CUR:
case ESR_ELx_EC_IABT_CUR:
@@ -231,6 +234,8 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
+ task_isolation_kernel_enter();
+
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_SVC64:
el0_svc(regs);
@@ -300,6 +305,8 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
+ task_isolation_kernel_enter();
+
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_SVC32:
el0_svc_compat(regs);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 1e02e98e68dd..5acfc194bdd0 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -29,6 +29,7 @@
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
+#include <linux/isolation.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
@@ -1851,7 +1852,11 @@ static void tracehook_report_syscall(struct pt_regs *regs,
int syscall_trace_enter(struct pt_regs *regs)
{
- unsigned long flags = READ_ONCE(current_thread_info()->flags);
+ unsigned long flags;
+
+ task_isolation_kernel_enter();
+
+ flags = READ_ONCE(current_thread_info()->flags);
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
@@ -1859,6 +1864,15 @@ int syscall_trace_enter(struct pt_regs *regs)
return NO_SYSCALL;
}
+ /*
+ * In task isolation mode, we may prevent the syscall from
+ * running, and if so we also deliver a signal to the process.
+ */
+ if (test_thread_flag(TIF_TASK_ISOLATION)) {
+ if (task_isolation_syscall(regs->syscallno) == -1)
+ return NO_SYSCALL;
+ }
+
/* Do the secure computing after ptrace; failures should be fast. */
if (secure_computing() == -1)
return NO_SYSCALL;
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index dab88260b137..f65b676132f9 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -8,6 +8,7 @@
#include <linux/irqflags.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#include <linux/isolation.h>
#include <asm/alternative.h>
#include <asm/kprobes.h>
@@ -185,6 +186,7 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
__uaccess_enable_hw_pan();
err = sdei_event_handler(regs, arg);
+ task_isolation_interrupt("SDEI handled");
if (err)
return SDEI_EV_FAILED;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 3b4f31f35e45..ece90c5756be 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -20,6 +20,7 @@
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
+#include <linux/isolation.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
@@ -907,6 +908,11 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
+#define NOTIFY_RESUME_LOOP_FLAGS \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+ _TIF_UPROBE | _TIF_FSCHECK)
+
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
@@ -917,6 +923,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
*/
trace_hardirqs_off();
+ task_isolation_check_run_cleanup();
+
do {
/* Check valid user FS if needed */
addr_limit_user_check();
@@ -947,7 +955,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
local_daif_mask();
thread_flags = READ_ONCE(current_thread_info()->flags);
- } while (thread_flags & _TIF_WORK_MASK);
+ } while (thread_flags & NOTIFY_RESUME_LOOP_FLAGS);
+
+ if (thread_flags & _TIF_TASK_ISOLATION)
+ task_isolation_start();
}
unsigned long __ro_after_init signal_minsigstksz;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index e43a8ff19f0f..c893c8babe76 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -32,6 +32,7 @@
#include <linux/irq_work.h>
#include <linux/kexec.h>
#include <linux/kvm_host.h>
+#include <linux/isolation.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@@ -835,6 +836,7 @@ void arch_send_call_function_single_ipi(int cpu)
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
+ task_isolation_remote_cpumask(mask, "wakeup IPI");
smp_cross_call(mask, IPI_WAKEUP);
}
#endif
@@ -896,11 +898,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
+ task_isolation_kernel_enter();
+
if ((unsigned)ipinr < NR_IPI) {
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
}
+ task_isolation_interrupt("IPI type %d (%s)", ipinr,
+ ipinr < NR_IPI ? ipi_types[ipinr] : "unknown");
+
switch (ipinr) {
case IPI_RESCHEDULE:
scheduler_ipi();
@@ -963,12 +970,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
void smp_send_reschedule(int cpu)
{
+ task_isolation_remote(cpu, "reschedule IPI");
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
+ task_isolation_remote_cpumask(mask, "timer IPI");
smp_cross_call(mask, IPI_TIMER);
}
#endif
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 8afb238ff335..d01f3cbed87f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -23,6 +23,7 @@
#include <linux/perf_event.h>
#include <linux/preempt.h>
#include <linux/hugetlb.h>
+#include <linux/isolation.h>
#include <asm/acpi.h>
#include <asm/bug.h>
@@ -539,6 +540,10 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
VM_FAULT_BADACCESS)))) {
+ /* No signal was generated, but notify task-isolation tasks. */
+ if (user_mode(regs))
+ task_isolation_interrupt("page fault at %#lx", addr);
+
/*
* Major/minor page fault accounting is only done
* once. If we go through a retry, it is extremely
--
2.26.2
Powered by blists - more mailing lists