[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240621123903.2411843-19-nikunj@amd.com>
Date: Fri, 21 Jun 2024 18:08:57 +0530
From: Nikunj A Dadhania <nikunj@....com>
To: <linux-kernel@...r.kernel.org>, <thomas.lendacky@....com>, <bp@...en8.de>,
<x86@...nel.org>, <kvm@...r.kernel.org>
CC: <mingo@...hat.com>, <tglx@...utronix.de>, <dave.hansen@...ux.intel.com>,
<pgonda@...gle.com>, <seanjc@...gle.com>, <pbonzini@...hat.com>,
<nikunj@....com>
Subject: [PATCH v10 18/24] x86/sev: Add Secure TSC support for SNP guests
Add support for Secure TSC in SNP enabled guests. Secure TSC allows guest
to securely use RDTSC/RDTSCP instructions as the parameters being used
cannot be changed by the hypervisor once the guest is launched.
Secure TSC enabled guests need to query TSC info from the AMD Security
Processor. This communication channel is encrypted between the AMD Security
Processor and the guest, the hypervisor is just the conduit to deliver the
guest messages to the AMD Security Processor. Each message is protected
with an AEAD (AES-256 GCM). Use minimal AES GCM library to encrypt/decrypt
SNP Guest messages to communicate with the PSP.
Use the mem_encrypt_init() to fetch SNP TSC info from the AMD Security
Processor and initialize the snp_tsc_scale and snp_tsc_offset. During
secondary CPU initialization set VMSA fields GUEST_TSC_SCALE (offset 2F0h)
and GUEST_TSC_OFFSET(offset 2F8h) with snp_tsc_scale and snp_tsc_offset
respectively.
Signed-off-by: Nikunj A Dadhania <nikunj@....com>
Tested-by: Peter Gonda <pgonda@...gle.com>
---
arch/x86/include/asm/sev-common.h | 1 +
arch/x86/include/asm/sev.h | 22 ++++++++
arch/x86/include/asm/svm.h | 6 ++-
arch/x86/coco/sev/core.c | 90 +++++++++++++++++++++++++++++++
arch/x86/mm/mem_encrypt.c | 4 ++
5 files changed, 121 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index e90d403f2068..4cb3ea6564da 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -181,6 +181,7 @@ struct snp_psc_desc {
#define GHCB_TERM_NO_SVSM 7 /* SVSM is not advertised in the secrets page */
#define GHCB_TERM_SVSM_VMPL0 8 /* SVSM is present but has set VMPL to 0 */
#define GHCB_TERM_SVSM_CAA 9 /* SVSM is present but CAA is not page aligned */
+#define GHCB_TERM_SECURE_TSC 10 /* Secure TSC initialization failed */
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index c5ead3230d18..f2ce29345163 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -160,6 +160,9 @@ enum msg_type {
SNP_MSG_VMRK_REQ,
SNP_MSG_VMRK_RSP,
+ SNP_MSG_TSC_INFO_REQ = 17,
+ SNP_MSG_TSC_INFO_RSP,
+
SNP_MSG_TYPE_MAX
};
@@ -191,6 +194,22 @@ struct snp_guest_msg {
#define SNP_GUEST_MSG_SIZE 4096
#define SNP_GUEST_MSG_PAYLOAD_SIZE (SNP_GUEST_MSG_SIZE - sizeof(struct snp_guest_msg))
+#define SNP_TSC_INFO_REQ_SZ 128
+#define SNP_TSC_INFO_RESP_SZ 128
+
+struct snp_tsc_info_req {
+ u8 rsvd[SNP_TSC_INFO_REQ_SZ];
+} __packed;
+
+struct snp_tsc_info_resp {
+ u32 status;
+ u32 rsvd1;
+ u64 tsc_scale;
+ u64 tsc_offset;
+ u32 tsc_factor;
+ u8 rsvd2[100];
+} __packed;
+
struct snp_guest_dev {
struct device *dev;
struct miscdevice misc;
@@ -212,6 +231,7 @@ struct snp_guest_dev {
struct snp_report_req report;
struct snp_derived_key_req derived_key;
struct snp_ext_report_req ext_report;
+ struct snp_tsc_info_req tsc_info;
} req;
unsigned int vmpck_id;
};
@@ -481,6 +501,7 @@ static inline void *alloc_shared_pages(size_t sz)
return page_address(page);
}
+void __init snp_secure_tsc_prepare(void);
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -522,6 +543,7 @@ static inline int snp_send_guest_request(struct snp_guest_dev *snp_dev, struct s
struct snp_guest_request_ioctl *rio) { return -EINVAL; }
static inline void free_shared_pages(void *buf, size_t sz) { }
static inline void *alloc_shared_pages(size_t sz) { return NULL; }
+static inline void __init snp_secure_tsc_prepare(void) { }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 728c98175b9c..91d6c8a79aa2 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -410,7 +410,9 @@ struct sev_es_save_area {
u8 reserved_0x298[80];
u32 pkru;
u32 tsc_aux;
- u8 reserved_0x2f0[24];
+ u64 tsc_scale;
+ u64 tsc_offset;
+ u8 reserved_0x300[8];
u64 rcx;
u64 rdx;
u64 rbx;
@@ -542,7 +544,7 @@ static inline void __unused_size_checks(void)
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298);
- BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0);
+ BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x300);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0);
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index e0b79e292fcf..7aed6819930b 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -96,6 +96,10 @@ static u64 sev_hv_features __ro_after_init;
/* Secrets page physical address from the CC blob */
static u64 secrets_pa __ro_after_init;
+/* Secure TSC values read using TSC_INFO SNP Guest request */
+static u64 snp_tsc_scale __ro_after_init;
+static u64 snp_tsc_offset __ro_after_init;
+
/* #VC handler runtime per-CPU data */
struct sev_es_runtime_data {
struct ghcb ghcb_page;
@@ -1173,6 +1177,12 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
vmsa->vmpl = snp_vmpl;
vmsa->sev_features = sev_status >> 2;
+ /* Set Secure TSC parameters */
+ if (cc_platform_has(CC_ATTR_GUEST_SECURE_TSC)) {
+ vmsa->tsc_scale = snp_tsc_scale;
+ vmsa->tsc_offset = snp_tsc_offset;
+ }
+
/* Switch the page over to a VMSA page now that it is initialized */
ret = snp_set_vmsa(vmsa, caa, apic_id, true);
if (ret) {
@@ -2965,3 +2975,83 @@ void snp_guest_messaging_exit(struct snp_guest_dev *snp_dev)
iounmap(snp_dev->secrets);
}
EXPORT_SYMBOL_GPL(snp_guest_messaging_exit);
+
+static struct snp_guest_dev tsc_snp_dev __initdata;
+
+static int __init snp_get_tsc_info(void)
+{
+ struct snp_tsc_info_req *tsc_req = &tsc_snp_dev.req.tsc_info;
+ static u8 buf[SNP_TSC_INFO_RESP_SZ + AUTHTAG_LEN];
+ struct snp_guest_request_ioctl rio;
+ struct snp_tsc_info_resp tsc_resp;
+ struct snp_guest_req req;
+ int rc;
+
+ /*
+ * The intermediate response buffer is used while decrypting the
+ * response payload. Make sure that it has enough space to cover the
+ * authtag.
+ */
+ BUILD_BUG_ON(sizeof(buf) < (sizeof(tsc_resp) + AUTHTAG_LEN));
+
+ if (!snp_assign_vmpck(&tsc_snp_dev, 0))
+ return -EINVAL;
+
+ rc = snp_guest_messaging_init(&tsc_snp_dev);
+ if (rc)
+ return rc;
+
+ memset(tsc_req, 0, sizeof(*tsc_req));
+ memset(&req, 0, sizeof(req));
+ memset(&rio, 0, sizeof(rio));
+ memset(buf, 0, sizeof(buf));
+
+ req.msg_version = MSG_HDR_VER;
+ req.msg_type = SNP_MSG_TSC_INFO_REQ;
+ req.vmpck_id = tsc_snp_dev.vmpck_id;
+ req.req_buf = tsc_req;
+ req.req_sz = sizeof(*tsc_req);
+ req.resp_buf = buf;
+ req.resp_sz = sizeof(tsc_resp) + AUTHTAG_LEN;
+ req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+
+ rc = snp_send_guest_request(&tsc_snp_dev, &req, &rio);
+ if (rc)
+ goto err_req;
+
+ memcpy(&tsc_resp, buf, sizeof(tsc_resp));
+ pr_debug("%s: response status %x scale %llx offset %llx factor %x\n",
+ __func__, tsc_resp.status, tsc_resp.tsc_scale, tsc_resp.tsc_offset,
+ tsc_resp.tsc_factor);
+
+ if (tsc_resp.status == 0) {
+ snp_tsc_scale = tsc_resp.tsc_scale;
+ snp_tsc_offset = tsc_resp.tsc_offset;
+ } else {
+ pr_err("Failed to get TSC info, response status %x\n", tsc_resp.status);
+ rc = -EIO;
+ }
+
+err_req:
+ /* The response buffer contains the sensitive data, explicitly clear it. */
+ memzero_explicit(buf, sizeof(buf));
+ memzero_explicit(&tsc_resp, sizeof(tsc_resp));
+ memzero_explicit(&req, sizeof(req));
+
+ snp_guest_messaging_exit(&tsc_snp_dev);
+
+ return rc;
+}
+
+void __init snp_secure_tsc_prepare(void)
+{
+ if (!cc_platform_has(CC_ATTR_GUEST_SECURE_TSC))
+ return;
+
+ if (snp_get_tsc_info()) {
+ pr_alert("Unable to retrieve Secure TSC info from ASP\n");
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_SECURE_TSC);
+ }
+
+ pr_debug("SecureTSC enabled");
+}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0a120d85d7bb..996ca27f0b72 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -94,6 +94,10 @@ void __init mem_encrypt_init(void)
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes();
+ /* Initialize SNP Secure TSC */
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ snp_secure_tsc_prepare();
+
print_mem_encrypt_feature_info();
}
--
2.34.1
Powered by blists - more mailing lists