lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon,  4 May 2020 16:49:37 +0200
From:   Alexandre Chartre <alexandre.chartre@...cle.com>
To:     rkrcmar@...hat.com, tglx@...utronix.de, mingo@...hat.com,
        bp@...en8.de, hpa@...or.com, dave.hansen@...ux.intel.com,
        luto@...nel.org, peterz@...radead.org, x86@...nel.org,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc:     pbonzini@...hat.com, konrad.wilk@...cle.com,
        jan.setjeeilers@...cle.com, liran.alon@...cle.com,
        junaids@...gle.com, graf@...zon.de, rppt@...ux.vnet.ibm.com,
        kuzuno@...il.com, mgross@...ux.intel.com,
        alexandre.chartre@...cle.com
Subject: [RFC v4][PATCH part-1 5/7] mm/asi: Exit/enter ASI when task enters/exits scheduler

Exit ASI as soon as a task is entering the scheduler (__schedule()),
otherwise ASI will likely quick fault, for example when accessing
run queues. The task will return to ASI when it is scheduled again.

Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
 arch/x86/include/asm/asi.h |  3 ++
 arch/x86/mm/asi.c          | 67 ++++++++++++++++++++++++++++++++++++++
 include/linux/sched.h      |  9 +++++
 kernel/sched/core.c        | 17 ++++++++++
 4 files changed, 96 insertions(+)

diff --git a/arch/x86/include/asm/asi.h b/arch/x86/include/asm/asi.h
index d240954b2f85..a0733f1e4a67 100644
--- a/arch/x86/include/asm/asi.h
+++ b/arch/x86/include/asm/asi.h
@@ -102,6 +102,9 @@ struct asi {
 	unsigned long		base_cr3;	/* base ASI CR3 */
 };
 
+void asi_schedule_out(struct task_struct *task);
+void asi_schedule_in(struct task_struct *task);
+
 extern struct asi *asi_create(struct asi_type *type);
 extern void asi_destroy(struct asi *asi);
 extern void asi_set_pagetable(struct asi *asi, pgd_t *pagetable);
diff --git a/arch/x86/mm/asi.c b/arch/x86/mm/asi.c
index c91ba82a095b..3795582c66d8 100644
--- a/arch/x86/mm/asi.c
+++ b/arch/x86/mm/asi.c
@@ -229,3 +229,70 @@ void asi_prepare_resume(void)
 
 	asi_switch_to_asi_cr3(asi_session->asi, ASI_SWITCH_ON_RESUME);
 }
+
+void asi_schedule_out(struct task_struct *task)
+{
+	struct asi_session *asi_session;
+	unsigned long flags;
+	struct asi *asi;
+
+	asi = this_cpu_read(cpu_asi_session.asi);
+	if (!asi)
+		return;
+
+	/*
+	 * Save the ASI session.
+	 *
+	 * Exit the session if it hasn't been interrupted, otherwise
+	 * just save the session state.
+	 */
+	local_irq_save(flags);
+	if (!this_cpu_read(cpu_asi_session.idepth)) {
+		asi_switch_to_kernel_cr3(asi);
+		task->asi_session.asi = asi;
+		task->asi_session.idepth = 0;
+	} else {
+		asi_session = &get_cpu_var(cpu_asi_session);
+		task->asi_session = *asi_session;
+		asi_session->asi = NULL;
+		asi_session->idepth = 0;
+	}
+	local_irq_restore(flags);
+}
+
+void asi_schedule_in(struct task_struct *task)
+{
+	struct asi_session *asi_session;
+	unsigned long flags;
+	struct asi *asi;
+
+	asi = task->asi_session.asi;
+	if (!asi)
+		return;
+
+	/*
+	 * At this point, the CPU shouldn't be using ASI because the
+	 * ASI session is expected to be cleared in asi_schedule_out().
+	 */
+	WARN_ON(this_cpu_read(cpu_asi_session.asi));
+
+	/*
+	 * Restore ASI.
+	 *
+	 * If the task was scheduled out while using ASI, then the ASI
+	 * is already setup and we can immediately switch to ASI page
+	 * table.
+	 *
+	 * Otherwise, if the task was scheduled out while ASI was
+	 * interrupted, just restore the ASI session.
+	 */
+	local_irq_save(flags);
+	if (!task->asi_session.idepth) {
+		asi_switch_to_asi_cr3(asi, ASI_SWITCH_NOW);
+	} else {
+		asi_session = &get_cpu_var(cpu_asi_session);
+		*asi_session = task->asi_session;
+	}
+	task->asi_session.asi = NULL;
+	local_irq_restore(flags);
+}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4418f5cb8324..ea86bda713ee 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -10,6 +10,7 @@
 #include <uapi/linux/sched.h>
 
 #include <asm/current.h>
+#include <asm/asi_session.h>
 
 #include <linux/pid.h>
 #include <linux/sem.h>
@@ -1289,6 +1290,14 @@ struct task_struct {
 	unsigned long			prev_lowest_stack;
 #endif
 
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+	/*
+	 * ASI session is saved here when the task is scheduled out
+	 * while an ASI session was active or interrupted.
+	 */
+	struct asi_session		asi_session;
+#endif
+
 	/*
 	 * New fields for task_struct should be added above here, so that
 	 * they are included in the randomized portion of task_struct.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9a2fbf98fd6f..140071cfa25d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -14,6 +14,7 @@
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
+#include <asm/asi.h>
 
 #include "../workqueue_internal.h"
 #include "../../fs/io-wq.h"
@@ -3241,6 +3242,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
 	}
 
 	tick_nohz_task_switch();
+
 	return rq;
 }
 
@@ -4006,6 +4008,14 @@ static void __sched notrace __schedule(bool preempt)
 	struct rq *rq;
 	int cpu;
 
+	/*
+	 * If the task is using ASI then exit it right away otherwise the
+	 * ASI will likely quickly fault, for example when accessing run
+	 * queues.
+	 */
+	if (IS_ENABLED(CONFIG_ADDRESS_SPACE_ISOLATION))
+		asi_schedule_out(current);
+
 	cpu = smp_processor_id();
 	rq = cpu_rq(cpu);
 	prev = rq->curr;
@@ -4087,6 +4097,13 @@ static void __sched notrace __schedule(bool preempt)
 	}
 
 	balance_callback(rq);
+
+	/*
+	 * Now the task will resume execution, we can safely return to
+	 * its ASI if one was in used.
+	 */
+	if (IS_ENABLED(CONFIG_ADDRESS_SPACE_ISOLATION))
+		asi_schedule_in(current);
 }
 
 void __noreturn do_task_dead(void)
-- 
2.18.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ