[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923050942.206116-27-Neeraj.Upadhyay@amd.com>
Date: Tue, 23 Sep 2025 10:39:33 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: <kvm@...r.kernel.org>, <seanjc@...gle.com>, <pbonzini@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <Thomas.Lendacky@....com>,
<nikunj@....com>, <Santosh.Shukla@....com>, <Vasant.Hegde@....com>,
<Suravee.Suthikulpanit@....com>, <bp@...en8.de>, <David.Kaplan@....com>,
<huibo.wang@....com>, <naveen.rao@....com>, <pgonda@...gle.com>,
<linux-kselftest@...r.kernel.org>, <shuah@...nel.org>, <tiala@...rosoft.com>
Subject: [RFC PATCH v2 26/35] KVM: selftests: Add IPI handling support for Secure AVIC
When a guest vCPU has Secure AVIC (SAVIC) enabled, an attempt to send an
Inter-Processor Interrupt (IPI) by writing to the Interrupt Command
Register (APIC_ICR) does not complete in hardware. Instead, it triggers
a #VC exception with the exit code SVM_EXIT_AVIC_INCOMPLETE_IPI. This
design delegates the responsibility of IPI delivery to the guest's #VC
handler.
Implement the necessary #VC handler to process these IPI requests,
enabling multi-vCPU IPI communication for SAVIC guests.
Add support for all IPI modes: physical and logical destination modes,
as well as "broadcast" and "broadcast-but-self" destination shorthands.
Inject the IPI into each target vCPU by directly setting the appropriate
bit in the Interrupt Request Register (IRR) of its backing page. In
addition, propagate the ICR write to the hypervisor via a VMGEXIT. This
is crucial for KVM to perform actions like waking up a halted vCPU.
APIC_ICR based NMI delivery is also supported via a the NMIReq field in
the backing page.
This functionality is a prerequisite for testing IPIs in SAVIC guests.
It allows selftests like xapic_ipi_test to correctly validate IPI delivery
for SAVIC enabled guests.
Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
---
tools/testing/selftests/kvm/lib/x86/savic.c | 111 ++++++++++++++++++++
1 file changed, 111 insertions(+)
diff --git a/tools/testing/selftests/kvm/lib/x86/savic.c b/tools/testing/selftests/kvm/lib/x86/savic.c
index 9b1ea5d15338..016e5e9e43f6 100644
--- a/tools/testing/selftests/kvm/lib/x86/savic.c
+++ b/tools/testing/selftests/kvm/lib/x86/savic.c
@@ -42,6 +42,12 @@ enum lapic_lvt_entry {
#define MSR_AMD64_SECURE_AVIC_ALLOWED_NMI_BIT 1
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
+#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
+
+#define REG_OFF(VEC) (VEC / 32 * 16)
+#define VEC_POS(VEC) (VEC % 32)
+
+#define SAVIC_NMI_REQ_OFFSET 0x278
/*
* Initial pool of guest apic backing page.
@@ -335,6 +341,104 @@ static void handle_savic_unaccel_access(struct ex_regs *regs)
}
}
+static void send_ipi(int cpu, int vector, bool nmi)
+{
+ struct guest_apic_page *apic_page;
+
+ apic_page = &apic_page_pool->guest_apic_page[cpu];
+
+ if (nmi)
+ savic_write_reg(apic_page, SAVIC_NMI_REQ_OFFSET, 1);
+ else
+ savic_write_reg(apic_page, APIC_IRR + REG_OFF(vector), BIT(VEC_POS(vector)));
+}
+
+static bool is_cpu_present(int cpu)
+{
+ struct guest_apic_page *apic_page;
+
+ if (cpu >= KVM_MAX_VCPUS)
+ return false;
+
+ apic_page = &apic_page_pool->guest_apic_page[cpu];
+
+ return savic_read_reg(apic_page, APIC_ID) != 0;
+}
+
+static void savic_send_ipi_all_but(int vector, bool nmi)
+{
+ int cpu;
+ int mycpu = x2apic_read_reg(APIC_ID);
+
+ for (cpu = 0; cpu < KVM_MAX_VCPUS; cpu++) {
+ if (cpu == mycpu)
+ continue;
+ if (!(cpu == 0 || is_cpu_present(cpu)))
+ break;
+ send_ipi(cpu, vector, nmi);
+ }
+}
+
+static bool ipi_match_dest(uint32_t dest, bool logical, int dest_cpu)
+{
+ struct guest_apic_page *apic_page;
+
+ apic_page = &apic_page_pool->guest_apic_page[dest_cpu];
+ uint32_t ldr;
+
+ if (logical) {
+ ldr = savic_read_reg(apic_page, APIC_LDR);
+ return ((ldr >> 16) == (dest >> 16)) &&
+ (ldr & dest & 0xffff) != 0;
+ } else {
+ return dest == savic_read_reg(apic_page, APIC_ID);
+ }
+}
+
+static void savic_send_ipi_target(uint32_t dest, int vector, bool logical,
+ bool nmi)
+{
+ int cpu;
+ int mycpu = x2apic_read_reg(APIC_ID);
+
+ for (cpu = 0; cpu < KVM_MAX_VCPUS; cpu++) {
+ if (cpu == mycpu)
+ continue;
+ if (!(cpu == 0 || is_cpu_present(cpu)))
+ break;
+ if (ipi_match_dest(dest, logical, cpu))
+ send_ipi(cpu, vector, nmi);
+ }
+}
+
+static void savic_handle_icr_write(uint64_t icr_data)
+{
+ int dsh = icr_data & APIC_DEST_ALLBUT;
+ int vector = icr_data & APIC_VECTOR_MASK;
+ bool logical = icr_data & APIC_DEST_LOGICAL;
+ bool nmi = (icr_data & APIC_DM_FIXED_MASK) == APIC_DM_NMI;
+ uint64_t self_icr_data = APIC_DEST_SELF | APIC_INT_ASSERT | vector;
+
+ if (nmi)
+ self_icr_data |= APIC_DM_NMI;
+
+ switch (dsh) {
+ case APIC_DEST_ALLINC:
+ savic_send_ipi_all_but(vector, nmi);
+ savic_hv_write_reg(APIC_ICR, icr_data);
+ x2apic_write_reg(APIC_ICR, self_icr_data);
+ break;
+ case APIC_DEST_ALLBUT:
+ savic_send_ipi_all_but(vector, nmi);
+ savic_hv_write_reg(APIC_ICR, icr_data);
+ break;
+ default:
+ savic_send_ipi_target(icr_data >> 32, vector, logical, nmi);
+ savic_hv_write_reg(APIC_ICR, icr_data);
+ break;
+ }
+}
+
void savic_vc_handler(struct ex_regs *regs)
{
uint64_t exit_code = regs->error_code;
@@ -343,6 +447,13 @@ void savic_vc_handler(struct ex_regs *regs)
case SVM_EXIT_AVIC_UNACCELERATED_ACCESS:
handle_savic_unaccel_access(regs);
break;
+ case SVM_EXIT_AVIC_INCOMPLETE_IPI:
+ uint64_t icr_data = regs->rax | (regs->rdx << 32);
+ uint32_t reg = (regs->rcx - APIC_BASE_MSR) << 4;
+
+ GUEST_ASSERT(reg == APIC_ICR);
+ savic_handle_icr_write(icr_data);
+ break;
default:
sev_es_vc_handler(regs);
break;
--
2.34.1
Powered by blists - more mailing lists