lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 11 Aug 2022 17:05:59 -0400
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     seanjc@...gle.com, mlevitsk@...hat.com, vkuznets@...hat.com
Subject: [PATCH v2 3/9] KVM: x86: make kvm_vcpu_{block,halt} return whether vCPU is runnable

This is currently returned via KVM_REQ_UNHALT, but this is completely
unnecessary since all that the callers do is clear the request; it
is never processed via the usual request loop.  The same condition
can be returned as a positive value from the functions.

No functional change intended.

Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
 include/linux/kvm_host.h |  4 ++--
 virt/kvm/kvm_main.c      | 23 ++++++++++++++++++-----
 2 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e7bd48d15db8..cbd9577e5447 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1338,8 +1338,8 @@ void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 
-void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1f049c1d01b4..e827805b7b28 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3402,6 +3402,12 @@ static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
 }
 
+/*
+ * Returns zero if the vCPU should remain in a blocked state,
+ * nonzero if it has been woken up, specifically:
+ * - 1 if it is runnable
+ * - -EINTR if it is not runnable (e.g. has a signal or a timer pending)
+ */
 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
 {
 	int ret = -EINTR;
@@ -3409,6 +3415,7 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
 
 	if (kvm_arch_vcpu_runnable(vcpu)) {
 		kvm_make_request(KVM_REQ_UNHALT, vcpu);
+		ret = 1;
 		goto out;
 	}
 	if (kvm_cpu_has_pending_timer(vcpu))
@@ -3429,9 +3436,10 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
  * pending.  This is mostly used when halting a vCPU, but may also be used
  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
  */
-void kvm_vcpu_block(struct kvm_vcpu *vcpu)
+int kvm_vcpu_block(struct kvm_vcpu *vcpu)
 {
 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
+	int r;
 
 	vcpu->stat.generic.blocking = 1;
 
@@ -3443,7 +3451,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
 	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
 
-		if (kvm_vcpu_check_block(vcpu) < 0)
+		r = kvm_vcpu_check_block(vcpu);
+		if (r != 0)
 			break;
 
 		schedule();
@@ -3455,6 +3464,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
 	preempt_enable();
 
 	vcpu->stat.generic.blocking = 0;
+	return r;
 }
 
 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
@@ -3485,12 +3495,13 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
  * is halted.
  */
-void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 {
 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
 	bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
 	ktime_t start, cur, poll_end, stop;
 	bool waited = false;
+	int r;
 	u64 halt_ns;
 
 	start = cur = poll_end = ktime_get();
@@ -3501,14 +3512,15 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 		 * This sets KVM_REQ_UNHALT if an interrupt
 		 * arrives.
 		 */
-		if (kvm_vcpu_check_block(vcpu) < 0)
+		r = kvm_vcpu_check_block(vcpu);
+		if (r != 0)
 			goto out;
 		cpu_relax();
 		poll_end = cur = ktime_get();
 	} while (kvm_vcpu_can_poll(cur, stop));
 
 	waited = true;
-	kvm_vcpu_block(vcpu);
+	r = kvm_vcpu_block(vcpu);
 
 	cur = ktime_get();
 	vcpu->stat.generic.halt_wait_ns +=
@@ -3547,6 +3559,7 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 	}
 
 	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
+	return r;
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 
-- 
2.31.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ