[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250106124633.1418972-11-nikunj@amd.com>
Date: Mon, 6 Jan 2025 18:16:30 +0530
From: Nikunj A Dadhania <nikunj@....com>
To: <linux-kernel@...r.kernel.org>, <thomas.lendacky@....com>, <bp@...en8.de>,
<x86@...nel.org>
CC: <kvm@...r.kernel.org>, <mingo@...hat.com>, <tglx@...utronix.de>,
<dave.hansen@...ux.intel.com>, <pgonda@...gle.com>, <seanjc@...gle.com>,
<pbonzini@...hat.com>, <nikunj@....com>, <francescolavra.fl@...il.com>
Subject: [PATCH v16 10/13] x86/tsc: Switch Secure TSC guests away from kvm-clock
Although the kernel switches over to a stable TSC clocksource instead of
kvm-clock, TSC frequency calibration still relies on the kvm-clock based
frequency calibration. This is due to kvmclock_init() unconditionally
updating the x86_platform's CPU and TSC callbacks.
For Secure TSC enabled guests, use the GUEST_TSC_FREQ MSR to discover the
TSC frequency instead of relying on kvm-clock based frequency calibration.
Override both CPU and TSC frequency calibration callbacks with
securetsc_get_tsc_khz(). Since the difference between CPU base and TSC
frequency does not apply in this case, the same callback is being used.
Additionally, warn users when kvm-clock is selected as the clocksource for
Secure TSC enabled guests. Users can change the clocksource to kvm-clock
using the sysfs interface while running on a Secure TSC enabled guest.
Switching to the hypervisor-controlled kvm-clock can lead to potential
security issues.
Taint the kernel and issue a warning to the user when the clocksource
switches to kvm-clock, ensuring they are aware of the change and its
implications.
Signed-off-by: Nikunj A Dadhania <nikunj@....com>
---
arch/x86/include/asm/sev.h | 2 ++
arch/x86/coco/sev/core.c | 21 +++++++++++++++++++++
arch/x86/kernel/kvmclock.c | 11 +++++++++++
arch/x86/kernel/tsc.c | 4 ++++
4 files changed, 38 insertions(+)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index bdcdaac4df1c..5d9685f92e5c 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -482,6 +482,7 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
struct snp_guest_request_ioctl *rio);
void __init snp_secure_tsc_prepare(void);
+void __init snp_secure_tsc_init(void);
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -524,6 +525,7 @@ static inline void snp_msg_free(struct snp_msg_desc *mdesc) { }
static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
struct snp_guest_request_ioctl *rio) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
+static inline void __init snp_secure_tsc_init(void) { }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index dbf4531c6271..9c971637e56b 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -103,6 +103,7 @@ static u64 secrets_pa __ro_after_init;
*/
static u64 snp_tsc_scale __ro_after_init;
static u64 snp_tsc_offset __ro_after_init;
+static u64 snp_tsc_freq_khz __ro_after_init;
/* #VC handler runtime per-CPU data */
struct sev_es_runtime_data {
@@ -3273,3 +3274,23 @@ void __init snp_secure_tsc_prepare(void)
pr_debug("SecureTSC enabled");
}
+
+static unsigned long securetsc_get_tsc_khz(void)
+{
+ return snp_tsc_freq_khz;
+}
+
+void __init snp_secure_tsc_init(void)
+{
+ unsigned long long tsc_freq_mhz;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
+ return;
+
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
+ snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
+
+ x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
+ x86_platform.calibrate_tsc = securetsc_get_tsc_khz;
+}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 5b2c15214a6b..960260a8d884 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -21,6 +21,7 @@
#include <asm/hypervisor.h>
#include <asm/x86_init.h>
#include <asm/kvmclock.h>
+#include <asm/sev.h>
static int kvmclock __initdata = 1;
static int kvmclock_vsyscall __initdata = 1;
@@ -150,6 +151,16 @@ bool kvm_check_and_clear_guest_paused(void)
static int kvm_cs_enable(struct clocksource *cs)
{
+ /*
+ * TSC clocksource should be used for a guest with Secure TSC enabled,
+ * taint the kernel and warn when the user changes the clocksource to
+ * kvm-clock.
+ */
+ if (cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC)) {
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ WARN_ONCE(1, "For Secure TSC guest, changing the clocksource is not allowed!\n");
+ }
+
vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK);
return 0;
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a85594644e13..34dec0b72ea8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -31,6 +31,7 @@
#include <asm/i8259.h>
#include <asm/topology.h>
#include <asm/uv/uv.h>
+#include <asm/sev.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -1514,6 +1515,9 @@ void __init tsc_early_init(void)
/* Don't change UV TSC multi-chassis synchronization */
if (is_early_uv_system())
return;
+
+ snp_secure_tsc_init();
+
if (!determine_cpu_tsc_frequencies(true))
return;
tsc_enable_sched_clock();
--
2.34.1
Powered by blists - more mailing lists