[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250226090525.231882-5-Neeraj.Upadhyay@amd.com>
Date: Wed, 26 Feb 2025 14:35:12 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: <linux-kernel@...r.kernel.org>
CC: <bp@...en8.de>, <tglx@...utronix.de>, <mingo@...hat.com>,
<dave.hansen@...ux.intel.com>, <Thomas.Lendacky@....com>, <nikunj@....com>,
<Santosh.Shukla@....com>, <Vasant.Hegde@....com>,
<Suravee.Suthikulpanit@....com>, <David.Kaplan@....com>, <x86@...nel.org>,
<hpa@...or.com>, <peterz@...radead.org>, <seanjc@...gle.com>,
<pbonzini@...hat.com>, <kvm@...r.kernel.org>,
<kirill.shutemov@...ux.intel.com>, <huibo.wang@....com>, <naveen.rao@....com>
Subject: [RFC v2 04/17] x86/apic: Initialize APIC ID for Secure AVIC
Initialize the APIC ID in the Secure AVIC APIC backing page with
the APIC_ID msr value read from Hypervisor. Maintain a hashmap to
check and report same APIC_ID value returned by Hypervisor for two
different vCPUs.
Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
---
Changes since v1:
- Do not read APIC_ID from CPUID. Read APIC_ID from Hv and check for
duplicates.
- Add a more user-friendly log message on detecting duplicate APIC
IDs.
arch/x86/kernel/apic/x2apic_savic.c | 59 +++++++++++++++++++++++++++++
1 file changed, 59 insertions(+)
diff --git a/arch/x86/kernel/apic/x2apic_savic.c b/arch/x86/kernel/apic/x2apic_savic.c
index ba904f241d34..505ef2d29311 100644
--- a/arch/x86/kernel/apic/x2apic_savic.c
+++ b/arch/x86/kernel/apic/x2apic_savic.c
@@ -11,6 +11,8 @@
#include <linux/cc_platform.h>
#include <linux/percpu-defs.h>
#include <linux/align.h>
+#include <linux/sizes.h>
+#include <linux/llist.h>
#include <asm/apic.h>
#include <asm/sev.h>
@@ -19,6 +21,16 @@
static DEFINE_PER_CPU(void *, apic_backing_page);
+struct apic_id_node {
+ struct llist_node node;
+ u32 apic_id;
+ int cpu;
+};
+
+static DEFINE_PER_CPU(struct apic_id_node, apic_id_node);
+
+static struct llist_head *apic_id_map;
+
static int x2apic_savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC);
@@ -180,6 +192,44 @@ static void x2apic_savic_send_IPI_mask_allbutself(const struct cpumask *mask, in
__send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
}
+static void init_backing_page(void *backing_page)
+{
+ struct apic_id_node *next_node, *this_cpu_node;
+ unsigned int apic_map_slot;
+ u32 apic_id;
+ int cpu;
+
+ /*
+ * Before Secure AVIC is enabled, APIC msr reads are
+ * intercepted. APIC_ID msr read returns the value
+ * from hv.
+ */
+ apic_id = native_apic_msr_read(APIC_ID);
+ set_reg(backing_page, APIC_ID, apic_id);
+
+ if (!apic_id_map)
+ return;
+
+ cpu = smp_processor_id();
+ this_cpu_node = &per_cpu(apic_id_node, cpu);
+ this_cpu_node->apic_id = apic_id;
+ this_cpu_node->cpu = cpu;
+ /*
+ * In common case, apic_ids for CPUs are sequentially numbered.
+ * So, each CPU should hash to a different slot in the apic id
+ * map.
+ */
+ apic_map_slot = apic_id % nr_cpu_ids;
+ llist_add(&this_cpu_node->node, &apic_id_map[apic_map_slot]);
+ /* Each CPU checks only its next nodes for duplicates. */
+ llist_for_each_entry(next_node, this_cpu_node->node.next, node) {
+ if (WARN_ONCE(next_node->apic_id == apic_id,
+ "Duplicate APIC %u for cpu %d and cpu %d. IPI handling will suffer!",
+ apic_id, cpu, next_node->cpu))
+ break;
+ }
+}
+
static void x2apic_savic_setup(void)
{
void *backing_page;
@@ -193,6 +243,7 @@ static void x2apic_savic_setup(void)
if (!backing_page)
snp_abort();
this_cpu_write(apic_backing_page, backing_page);
+ init_backing_page(backing_page);
gpa = __pa(backing_page);
/*
@@ -212,6 +263,8 @@ static void x2apic_savic_setup(void)
static int x2apic_savic_probe(void)
{
+ int i;
+
if (!cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
return 0;
@@ -220,6 +273,12 @@ static int x2apic_savic_probe(void)
snp_abort();
}
+ apic_id_map = kvmalloc(nr_cpu_ids * sizeof(*apic_id_map), GFP_KERNEL);
+
+ if (apic_id_map)
+ for (i = 0; i < nr_cpu_ids; i++)
+ init_llist_head(&apic_id_map[i]);
+
pr_info("Secure AVIC Enabled\n");
return 1;
--
2.34.1
Powered by blists - more mailing lists