lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 28 Jun 2012 15:08:32 +0900
From:	Tomoki Sekiyama <tomoki.sekiyama.qu@...achi.com>
To:	kvm@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, x86@...nel.org,
	yrl.pp-manager.tt@...achi.com,
	Tomoki Sekiyama <tomoki.sekiyama.qu@...achi.com>,
	Avi Kivity <avi@...hat.com>,
	Marcelo Tosatti <mtosatti@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>
Subject: [RFC PATCH 14/18] KVM: Directly handle interrupts by guests without
 VM EXIT on slave CPUs

Make interrupts on slave CPUs handled by guests without VM EXIT.
This reduces CPU usage by the host to transfer interrupts of assigned
PCI devices from the host to guests. It also reduces cost of VM EXIT
and quickens response of guests to the interrupts.

When a slave CPU is dedicated to a vCPU, exit on external interrupts is
disabled. Unfortunately, we can only enable/disable exits for whole
external interrupts except NMIs and cannot switch exits based on IRQ#
or vectors. Thus, to avoid IPIs from online CPUs transferred to guests,
this patch modify kvm_vcpu_kick() to use NMI for guests on slave CPUs.

Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama.qu@...achi.com>
Cc: Avi Kivity <avi@...hat.com>
Cc: Marcelo Tosatti <mtosatti@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
---

 arch/x86/kvm/vmx.c       |    4 ++++
 arch/x86/kvm/x86.c       |   40 ++++++++++++++++++++++++++++++++++++++++
 include/linux/kvm_host.h |    1 +
 virt/kvm/kvm_main.c      |    5 +++--
 4 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f0c6532..3aea448 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7154,9 +7154,13 @@ static void vmx_set_slave_mode(struct kvm_vcpu *vcpu, bool slave)
 	if (slave) {
 		vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
 				CPU_BASED_HLT_EXITING);
+		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+				PIN_BASED_EXT_INTR_MASK);
 	} else {
 		vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
 			      CPU_BASED_HLT_EXITING);
+		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+			      PIN_BASED_EXT_INTR_MASK);
 	}
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index df5eb05..2e414a1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -63,6 +63,7 @@
 #include <asm/pvclock.h>
 #include <asm/div64.h>
 #include <asm/cpu.h>
+#include <asm/nmi.h>
 #include <asm/mmu.h>
 
 #define MAX_IO_MSRS 256
@@ -2635,6 +2636,8 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
 
 #ifdef CONFIG_SLAVE_CPU
 
+static int kvm_arch_kicked_by_nmi(unsigned int cmd, struct pt_regs *regs);
+
 static int kvm_arch_vcpu_ioctl_set_slave_cpu(struct kvm_vcpu *vcpu,
 					     int slave, int set_slave_mode)
 {
@@ -4998,6 +5001,11 @@ int kvm_arch_init(void *opaque)
 	if (cpu_has_xsave)
 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
 
+#ifdef CONFIG_SLAVE_CPU
+	register_nmi_handler(NMI_LOCAL, kvm_arch_kicked_by_nmi, 0,
+			     "kvm_kick");
+#endif
+
 	return 0;
 
 out:
@@ -5014,6 +5022,7 @@ void kvm_arch_exit(void)
 	unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
 #ifdef CONFIG_SLAVE_CPU
 	unregister_slave_cpu_notifier(&kvmclock_slave_cpu_notifier_block);
+	unregister_nmi_handler(NMI_LOCAL, "kvm_kick");
 #endif
 	kvm_x86_ops = NULL;
 	kvm_mmu_module_exit();
@@ -5311,6 +5320,28 @@ static void process_nmi(struct kvm_vcpu *vcpu)
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 
+#ifdef CONFIG_SLAVE_CPU
+/* vcpu currently running on each slave CPU */
+static DEFINE_PER_CPU(struct kvm_vcpu *, slave_vcpu);
+
+static int kvm_arch_kicked_by_nmi(unsigned int cmd, struct pt_regs *regs)
+{
+	struct kvm_vcpu *vcpu;
+	int cpu = smp_processor_id();
+
+	if (!cpu_slave(cpu))
+		return NMI_DONE;
+
+	/* if called from NMI handler after VM exit, no need to prevent run */
+	vcpu = __this_cpu_read(slave_vcpu);
+	if (!vcpu || vcpu->mode == OUTSIDE_GUEST_MODE || kvm_is_in_guest())
+		return NMI_HANDLED;
+
+	return NMI_HANDLED;
+}
+
+#endif
+
 enum vcpu_enter_guest_slave_retval {
 	EXIT_TO_USER = 0,
 	LOOP_ONLINE,		/* vcpu_post_run is done in online cpu */
@@ -5542,7 +5573,10 @@ static void __vcpu_enter_guest_slave(void *_arg)
 	kvm_arch_vcpu_load(vcpu, cpu);
 
 	while (r == LOOP_SLAVE) {
+		__this_cpu_write(slave_vcpu, vcpu);
+		smp_wmb();
 		r = vcpu_enter_guest(vcpu, arg->task);
+		__this_cpu_write(slave_vcpu, NULL);
 
 		if (unlikely(!irqs_disabled())) {
 			pr_err("irq is enabled on slave vcpu_etner_guest! - forcely disable\n");
@@ -6692,6 +6726,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
 }
 
+void kvm_arch_vcpu_kick_slave(struct kvm_vcpu *vcpu)
+{
+	if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
+		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), NMI_VECTOR);
+}
+
 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
 	return kvm_x86_ops->interrupt_allowed(vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c44a7be..9906908 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -533,6 +533,7 @@ void kvm_arch_hardware_unsetup(void);
 void kvm_arch_check_processor_compat(void *rtn);
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_kick_slave(struct kvm_vcpu *vcpu);
 
 void kvm_free_physmem(struct kvm *kvm);
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ff8b418..6a989e9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1531,10 +1531,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 	}
 
 	me = get_cpu();
-	if (cpu != me && (unsigned)cpu < nr_cpu_ids &&
-	    (cpu_online(cpu) || cpu_slave(cpu)))
+	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
 		if (kvm_arch_vcpu_should_kick(vcpu))
 			smp_send_reschedule(cpu);
+	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_slave(cpu))
+		kvm_arch_vcpu_kick_slave(vcpu);
 	put_cpu();
 }
 #endif /* !CONFIG_S390 */


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ