[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250825200622.3759571-4-seanjc@google.com>
Date: Mon, 25 Aug 2025 13:06:20 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Tianrui Zhao <zhaotianrui@...ngson.cn>, Bibo Mao <maobibo@...ngson.cn>,
Huacai Chen <chenhuacai@...nel.org>, Anup Patel <anup@...infault.org>,
Paul Walmsley <paul.walmsley@...ive.com>, Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>, Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>, Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>, Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"K. Y. Srinivasan" <kys@...rosoft.com>, Haiyang Zhang <haiyangz@...rosoft.com>, Wei Liu <wei.liu@...nel.org>,
Dexuan Cui <decui@...rosoft.com>, Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>, "Paul E. McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>, Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joelagnelf@...dia.com>, Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>, Uladzislau Rezki <urezki@...il.com>
Cc: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.linux.dev, kvm@...r.kernel.org, loongarch@...ts.linux.dev,
kvm-riscv@...ts.infradead.org, linux-riscv@...ts.infradead.org,
linux-hyperv@...r.kernel.org, rcu@...r.kernel.org
Subject: [PATCH 3/5] entry/kvm: KVM: Move KVM details related to signal/-EINTR
into KVM proper
Move KVM's morphing of pending signals into userspace exits into KVM
proper, and drop the @vcpu param from xfer_to_guest_mode_handle_work().
How KVM responds to -EINTR is a detail that really belongs in KVM itself,
and invoking kvm_handle_signal_exit() from kernel code creates an inverted
module dependency. E.g. attempting to move kvm_handle_signal_exit() into
kvm_main.c would generate an linker error when building kvm.ko as a module.
Dropping KVM details will also converting the KVM "entry" code into a more
generic virtualization framework so that it can be used when running as a
Hyper-V root partition.
Lastly, eliminating usage of "struct kvm_vcpu" outside of KVM is also nice
to have for KVM x86 developers, as keeping the details of kvm_vcpu purely
within KVM allows changing the layout of the structure without having to
boot into a new kernel, e.g. allows rebuilding and reloading kvm.ko with a
modified kvm_vcpu structure as part of debug/development.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/arm64/kvm/arm.c | 3 +--
arch/loongarch/kvm/vcpu.c | 3 +--
arch/riscv/kvm/vcpu.c | 3 +--
arch/x86/kvm/vmx/vmx.c | 1 -
arch/x86/kvm/x86.c | 3 +--
include/linux/entry-kvm.h | 11 +++--------
include/linux/kvm_host.h | 13 ++++++++++++-
kernel/entry/kvm.c | 13 +++++--------
8 files changed, 24 insertions(+), 26 deletions(-)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 888f7c7abf54..418fd3043467 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -6,7 +6,6 @@
#include <linux/bug.h>
#include <linux/cpu_pm.h>
-#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@@ -1177,7 +1176,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/*
* Check conditions before entering the guest
*/
- ret = xfer_to_guest_mode_handle_work(vcpu);
+ ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (!ret)
ret = 1;
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index ce478151466c..450545d2fc70 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -4,7 +4,6 @@
*/
#include <linux/kvm_host.h>
-#include <linux/entry-kvm.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/loongarch.h>
@@ -251,7 +250,7 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
/*
* Check conditions before entering the guest
*/
- ret = xfer_to_guest_mode_handle_work(vcpu);
+ ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (ret < 0)
return ret;
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index f001e56403f9..251e787f2ebc 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -7,7 +7,6 @@
*/
#include <linux/bitops.h>
-#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
@@ -910,7 +909,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
/* Check conditions before entering the guest */
- ret = xfer_to_guest_mode_handle_work(vcpu);
+ ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
if (ret)
continue;
ret = 1;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index aa157fe5b7b3..d7c86613e50a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/trace_events.h>
-#include <linux/entry-kvm.h>
#include <asm/apic.h>
#include <asm/asm.h>
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1c49bc681c4..0b13b8bf69e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -59,7 +59,6 @@
#include <linux/sched/stat.h>
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
-#include <linux/entry-kvm.h>
#include <linux/suspend.h>
#include <linux/smp.h>
@@ -11241,7 +11240,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (__xfer_to_guest_mode_work_pending()) {
kvm_vcpu_srcu_read_unlock(vcpu);
- r = xfer_to_guest_mode_handle_work(vcpu);
+ r = kvm_xfer_to_guest_mode_handle_work(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
if (r)
return r;
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 16149f6625e4..3644de7e6019 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -21,8 +21,6 @@
_TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
ARCH_XFER_TO_GUEST_MODE_WORK)
-struct kvm_vcpu;
-
/**
* arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
* mode work handling function.
@@ -32,12 +30,10 @@ struct kvm_vcpu;
* Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
* replaced by architecture specific code.
*/
-static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
- unsigned long ti_work);
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work);
#ifndef arch_xfer_to_guest_mode_work
-static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
- unsigned long ti_work)
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work)
{
return 0;
}
@@ -46,11 +42,10 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
/**
* xfer_to_guest_mode_handle_work - Check and handle pending work which needs
* to be handled before going to guest mode
- * @vcpu: Pointer to current's VCPU data
*
* Returns: 0 or an error code
*/
-int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+int xfer_to_guest_mode_handle_work(void);
/**
* xfer_to_guest_mode_prepare - Perform last minute preparation work that
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 15656b7fba6c..598b9473e46d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2,7 +2,7 @@
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
-
+#include <linux/entry-kvm.h>
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
@@ -2450,6 +2450,17 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu->stat.signal_exits++;
}
+
+static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
+{
+ int r = xfer_to_guest_mode_handle_work();
+
+ if (r) {
+ WARN_ON_ONCE(r != -EINTR);
+ kvm_handle_signal_exit(vcpu);
+ }
+ return r;
+}
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
/*
diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c
index 8485f63863af..6fc762eaacca 100644
--- a/kernel/entry/kvm.c
+++ b/kernel/entry/kvm.c
@@ -1,17 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/entry-kvm.h>
-#include <linux/kvm_host.h>
-static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
+static int xfer_to_guest_mode_work(unsigned long ti_work)
{
do {
int ret;
- if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
- kvm_handle_signal_exit(vcpu);
+ if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
return -EINTR;
- }
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
schedule();
@@ -19,7 +16,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
if (ti_work & _TIF_NOTIFY_RESUME)
resume_user_mode_work(NULL);
- ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
+ ret = arch_xfer_to_guest_mode_handle_work(ti_work);
if (ret)
return ret;
@@ -28,7 +25,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
return 0;
}
-int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
+int xfer_to_guest_mode_handle_work(void)
{
unsigned long ti_work;
@@ -44,6 +41,6 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
return 0;
- return xfer_to_guest_mode_work(vcpu, ti_work);
+ return xfer_to_guest_mode_work(ti_work);
}
EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work);
--
2.51.0.261.g7ce5a0a67e-goog
Powered by blists - more mailing lists