[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d1bb638b-cb7e-44d7-bd70-e1282c993ca4@amd.com>
Date: Thu, 2 Nov 2023 09:58:38 +0530
From: "Nikunj A. Dadhania" <nikunj@....com>
To: Tom Lendacky <thomas.lendacky@....com>,
linux-kernel@...r.kernel.org, x86@...nel.org, kvm@...r.kernel.org
Cc: bp@...en8.de, mingo@...hat.com, tglx@...utronix.de,
dave.hansen@...ux.intel.com, dionnaglaze@...gle.com,
pgonda@...gle.com, seanjc@...gle.com, pbonzini@...hat.com
Subject: Re: [PATCH v5 07/14] x86/sev: Move and reorganize sev guest request
api
On 10/31/2023 12:46 AM, Tom Lendacky wrote:
> On 10/30/23 01:36, Nikunj A Dadhania wrote:
>> For enabling Secure TSC, SEV-SNP guests need to communicate with the
>> AMD Security Processor early during boot. Many of the required
>> functions are implemented in the sev-guest driver and therefore not
>> available at early boot. Move the required functions and provide an
>> API to the driver to assign key and send guest request.
>>
>> Signed-off-by: Nikunj A Dadhania <nikunj@....com>
>> ---
>> @@ -72,6 +111,47 @@ struct snp_guest_req {
>> u8 msg_type;
>> };
>> -int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input,
>> - struct snp_guest_request_ioctl *rio);
>> +int snp_setup_psp_messaging(struct snp_guest_dev *snp_dev);
>> +int snp_send_guest_request(struct snp_guest_dev *dev, struct snp_guest_req *req,
>> + struct snp_guest_request_ioctl *rio);
>> +bool snp_assign_vmpck(struct snp_guest_dev *dev, unsigned int vmpck_id);
>> +bool snp_is_vmpck_empty(unsigned int vmpck_id);
>> +
>> +static void free_shared_pages(void *buf, size_t sz)
>
> These should probably be marked __inline if you're going to define them in a header file.
Sure, I will udpate both free_shared_pages() and alloc_shared_pages().
>
>> +{
>> + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
>> + int ret;
>> +
>> + if (!buf)
>> + return;
>> +
>> + ret = set_memory_encrypted((unsigned long)buf, npages);
>> + if (ret) {
>> + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
>> + return;
>> + }
>> +
>> + __free_pages(virt_to_page(buf), get_order(sz));
>> +}
>> +
>> +static void *alloc_shared_pages(size_t sz)
>> +{
>> + unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
>> + struct page *page;
>> + int ret;
>> +
>> + page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
>> + if (!page)
>> + return NULL;
>> +
>> + ret = set_memory_decrypted((unsigned long)page_address(page), npages);
>> + if (ret) {
>> + pr_err("%s: failed to mark page shared, ret=%d\n", __func__, ret);
>> + __free_pages(page, get_order(sz));
>> + return NULL;
>> + }
>> +
>> + return page_address(page);
>> +}
>> +
>> #endif /* __VIRT_SEVGUEST_H__ */
>> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
>> index 78465a8c7dc6..783150458864 100644
>> --- a/arch/x86/include/asm/sev.h
>> +++ b/arch/x86/include/asm/sev.h
>> @@ -93,16 +93,6 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
>> #define RMPADJUST_VMSA_PAGE_BIT BIT(16)
>> -/* SNP Guest message request */
>> -struct snp_req_data {
>> - unsigned long req_gpa;
>> - unsigned long resp_gpa;
>> -};
>> -
>> -struct sev_guest_platform_data {
>> - u64 secrets_gpa;
>> -};
>> -
>> /*
>> * The secrets page contains 96-bytes of reserved field that can be used by
>> * the guest OS. The guest OS uses the area to save the message sequence
>> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
>> index fd3b822fa9e7..fb3b1feb1b84 100644
>> --- a/arch/x86/kernel/sev.c
>> +++ b/arch/x86/kernel/sev.c
>> @@ -24,6 +24,7 @@
>> #include <linux/io.h>
>> #include <linux/psp-sev.h>
>> #include <uapi/linux/sev-guest.h>
>> +#include <crypto/gcm.h>
>> #include <asm/cpu_entry_area.h>
>> #include <asm/stacktrace.h>
>> @@ -941,6 +942,457 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
>> free_page((unsigned long)vmsa);
>> }
>> +static struct sev_guest_platform_data *platform_data;
>> +
>> +static inline u8 *snp_get_vmpck(unsigned int vmpck_id)
>> +{
>> + if (!platform_data)
>> + return NULL;
>> +
>> + return platform_data->layout->vmpck0 + vmpck_id * VMPCK_KEY_LEN;
>> +}
>> +
>> +static inline u32 *snp_get_os_area_msg_seqno(unsigned int vmpck_id)
>> +{
>> + if (!platform_data)
>> + return NULL;
>> +
>> + return &platform_data->layout->os_area.msg_seqno_0 + vmpck_id;
>> +}
>> +
>> +bool snp_is_vmpck_empty(unsigned int vmpck_id)
>> +{
>> + char zero_key[VMPCK_KEY_LEN] = {0};
>> + u8 *key = snp_get_vmpck(vmpck_id);
>> +
>> + if (key)
>> + return !memcmp(key, zero_key, VMPCK_KEY_LEN);
>> +
>> + return true;
>> +}
>> +EXPORT_SYMBOL_GPL(snp_is_vmpck_empty);
>> +
>> +/*
>> + * If an error is received from the host or AMD Secure Processor (ASP) there
>> + * are two options. Either retry the exact same encrypted request or discontinue
>> + * using the VMPCK.
>> + *
>> + * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
>> + * encrypt the requests. The IV for this scheme is the sequence number. GCM
>> + * cannot tolerate IV reuse.
>> + *
>> + * The ASP FW v1.51 only increments the sequence numbers on a successful
>> + * guest<->ASP back and forth and only accepts messages at its exact sequence
>> + * number.
>> + *
>> + * So if the sequence number were to be reused the encryption scheme is
>> + * vulnerable. If the sequence number were incremented for a fresh IV the ASP
>> + * will reject the request.
>> + */
>> +static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
>> +{
>> + u8 *key = snp_get_vmpck(snp_dev->vmpck_id);
>> +
>> + pr_alert("Disabling vmpck_id %d to prevent IV reuse.\n", snp_dev->vmpck_id);
>> + memzero_explicit(key, VMPCK_KEY_LEN);
>> +}
>> +
>> +static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
>> +{
>> + u32 *os_area_msg_seqno = snp_get_os_area_msg_seqno(snp_dev->vmpck_id);
>> + u64 count;
>> +
>> + if (!os_area_msg_seqno) {
>> + pr_err("SNP unable to get message sequence counter\n");
>> + return 0;
>> + }
>> +
>> + lockdep_assert_held(&snp_dev->cmd_mutex);
>> +
>> + /* Read the current message sequence counter from secrets pages */
>> + count = *os_area_msg_seqno;
>> +
>> + return count + 1;
>> +}
>> +
>> +/* Return a non-zero on success */
>> +static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
>> +{
>> + u64 count = __snp_get_msg_seqno(snp_dev);
>> +
>> + /*
>> + * The message sequence counter for the SNP guest request is a 64-bit
>> + * value but the version 2 of GHCB specification defines a 32-bit storage
>> + * for it. If the counter exceeds the 32-bit value then return zero.
>> + * The caller should check the return value, but if the caller happens to
>> + * not check the value and use it, then the firmware treats zero as an
>> + * invalid number and will fail the message request.
>> + */
>> + if (count >= UINT_MAX) {
>> + pr_err("SNP request message sequence counter overflow\n");
>> + return 0;
>> + }
>> +
>> + return count;
>> +}
>> +
>> +static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
>> +{
>> + u32 *os_area_msg_seqno = snp_get_os_area_msg_seqno(snp_dev->vmpck_id);
>> +
>> + if (!os_area_msg_seqno) {
>> + pr_err("SNP unable to get message sequence counter\n");
>> + return;
>> + }
>
> I probably missed this in the other patch or even when the driver was first created, but shouldn't we have a lockdep_assert_held() here, too, before updating the count?
As per the current code flow, snp_get_msg_seqno() is always called before snp_inc_msg_seqno(), maybe because of that the check wasnt there. It still makes sense to have a lockdep_assert_held() in snp_inc_msg_seqno().
Should I add this change as a separate fix ?
>
>> +
>> + /*
>> + * The counter is also incremented by the PSP, so increment it by 2
>> + * and save in secrets page.
>> + */
>> + *os_area_msg_seqno += 2;
>> +}
>> +
>> +static struct aesgcm_ctx *snp_init_crypto(unsigned int vmpck_id)
>> +{
>> + struct aesgcm_ctx *ctx;
>> + u8 *key;
>> +
>> + if (snp_is_vmpck_empty(vmpck_id)) {
>> + pr_err("SNP: vmpck id %d is null\n", vmpck_id);
>> + return NULL;
>> + }
>> +
>> + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
>> + if (!ctx)
>> + return NULL;
>> +
>> + key = snp_get_vmpck(vmpck_id);
>> + if (aesgcm_expandkey(ctx, key, VMPCK_KEY_LEN, AUTHTAG_LEN)) {
>> + pr_err("SNP: crypto init failed\n");
>> + kfree(ctx);
>> + return NULL;
>> + }
>> +
>> + return ctx;
>> +}
>> +
>> +int snp_setup_psp_messaging(struct snp_guest_dev *snp_dev)
>> +{
>> + struct sev_guest_platform_data *pdata;
>> + int ret;
>> +
>> + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
>> + pr_err("SNP not supported\n");
>> + return 0;
>> + }
>> +
>> + if (platform_data) {
>> + pr_debug("SNP platform data already initialized.\n");
>> + goto create_ctx;
>> + }
>> +
>> + if (!secrets_pa) {
>> + pr_err("SNP no secrets page\n");
>
> Maybe "SNP secrets page not found\n" ?
>
>> + return -ENODEV;
>> + }
>> +
>> + pdata = kzalloc(sizeof(struct sev_guest_platform_data), GFP_KERNEL);
>> + if (!pdata) {
>> + pr_err("SNP alloc failed\n");
>
> Maybe "Allocation of SNP guest platform data failed\n" ?
>
>> + return -ENOMEM;
>> + }
>> +
>> + pdata->layout = (__force void *)ioremap_encrypted(secrets_pa, PAGE_SIZE);
>> + if (!pdata->layout) {
>> + pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
>
> Maybe "Failed to map SNP secrets page\n" ? Not sure where the AP jump table came in on this...
>
>> + goto e_free_pdata;
>> + }
>> +
>> + ret = -ENOMEM;
>> + /* Allocate the shared page used for the request and response message. */
>> + pdata->request = alloc_shared_pages(sizeof(struct snp_guest_msg));
>> + if (!pdata->request)
>> + goto e_unmap;
>> +
>> + pdata->response = alloc_shared_pages(sizeof(struct snp_guest_msg));
>> + if (!pdata->response)
>> + goto e_free_request;
>> +
>> + /* initial the input address for guest request */
>> + pdata->input.req_gpa = __pa(pdata->request);
>> + pdata->input.resp_gpa = __pa(pdata->response);
>> + platform_data = pdata;
>> +
>> +create_ctx:
>> + ret = -EIO;
>> + snp_dev->ctx = snp_init_crypto(snp_dev->vmpck_id);
>> + if (!snp_dev->ctx) {
>> + pr_err("SNP init crypto failed\n");
>
> Maybe "SNP crypto context initialization failed\n" ?
>
>> + platform_data = NULL;
>> + goto e_free_response;
>> + }
>> +
>> + snp_dev->pdata = platform_data;
>
> Add a blank line here.
>
>> + return 0;
>> +
>> +e_free_response:
>> + free_shared_pages(pdata->response, sizeof(struct snp_guest_msg));
>> +e_free_request:
>> + free_shared_pages(pdata->request, sizeof(struct snp_guest_msg));
>> +e_unmap:
>> + iounmap(pdata->layout);
>> +e_free_pdata:
>> + kfree(pdata);
>> +
>> + return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(snp_setup_psp_messaging);
>> +
>> +static int __enc_payload(struct aesgcm_ctx *ctx, struct snp_guest_msg *msg,
>> + void *plaintext, size_t len)
>> +{
>> + struct snp_guest_msg_hdr *hdr = &msg->hdr;
>> + u8 iv[GCM_AES_IV_SIZE] = {};
>> +
>> + if (WARN_ON((hdr->msg_sz + ctx->authsize) > sizeof(msg->payload)))
>> + return -EBADMSG;
>> +
>> + memcpy(iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
>> + aesgcm_encrypt(ctx, msg->payload, plaintext, len, &hdr->algo, AAD_LEN,
>> + iv, hdr->authtag);
>> + return 0;
>> +}
>> +
>> +static int dec_payload(struct aesgcm_ctx *ctx, struct snp_guest_msg *msg,
>> + void *plaintext, size_t len)
>> +{
>> + struct snp_guest_msg_hdr *hdr = &msg->hdr;
>> + u8 iv[GCM_AES_IV_SIZE] = {};
>> +
>> + memcpy(iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
>> + if (aesgcm_decrypt(ctx, plaintext, msg->payload, len, &hdr->algo,
>> + AAD_LEN, iv, hdr->authtag))
>> + return 0;
>> + else
>> + return -EBADMSG;
>> +}
>> +
>> +static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_req *guest_req,
>> + struct sev_guest_platform_data *pdata)
>> +{
>> + struct snp_guest_msg *resp = &snp_dev->secret_response;
>> + struct snp_guest_msg *req = &snp_dev->secret_request;
>> + struct snp_guest_msg_hdr *req_hdr = &req->hdr;
>> + struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
>> + struct aesgcm_ctx *ctx = snp_dev->ctx;
>> +
>> + pr_debug("response [seqno %lld type %d version %d sz %d]\n",
>> + resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version,
>> + resp_hdr->msg_sz);
>> +
>> + /* Copy response from shared memory to encrypted memory. */
>> + memcpy(resp, pdata->response, sizeof(*resp));
>> +
>> + /* Verify that the sequence counter is incremented by 1 */
>> + if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
>> + return -EBADMSG;
>> +
>> + /* Verify response message type and version number. */
>> + if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
>> + resp_hdr->msg_version != req_hdr->msg_version)
>> + return -EBADMSG;
>> +
>> + /*
>> + * If the message size is greater than our buffer length then return
>> + * an error.
>> + */
>> + if (unlikely((resp_hdr->msg_sz + ctx->authsize) > guest_req->resp_sz))
>> + return -EBADMSG;
>> +
>> + return dec_payload(ctx, resp, guest_req->resp_buf, resp_hdr->msg_sz);
>> +}
>> +
>> +static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, struct snp_guest_req *req)
>> +{
>> + struct snp_guest_msg *msg = &snp_dev->secret_request;
>> + struct snp_guest_msg_hdr *hdr = &msg->hdr;
>> +
>> + memset(msg, 0, sizeof(*msg));
>> +
>> + hdr->algo = SNP_AEAD_AES_256_GCM;
>> + hdr->hdr_version = MSG_HDR_VER;
>> + hdr->hdr_sz = sizeof(*hdr);
>> + hdr->msg_type = req->msg_type;
>> + hdr->msg_version = req->msg_version;
>> + hdr->msg_seqno = seqno;
>> + hdr->msg_vmpck = req->vmpck_id;
>> + hdr->msg_sz = req->req_sz;
>> +
>> + /* Verify the sequence number is non-zero */
>> + if (!hdr->msg_seqno)
>> + return -ENOSR;
>> +
>> + pr_debug("request [seqno %lld type %d version %d sz %d]\n",
>> + hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
>> +
>> + return __enc_payload(snp_dev->ctx, msg, req->req_buf, req->req_sz);
>> +}
>> +
>> +static int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *input,
>> + struct snp_guest_request_ioctl *rio);
>
> Could all of these routines been moved down closer to the bottom of the file to avoid this forward declaration?
Looks possible, I will try it out.
>
>> +
>> +static int __handle_guest_request(struct snp_guest_dev *snp_dev, struct snp_guest_req *req,
>> + struct snp_guest_request_ioctl *rio,
>> + struct sev_guest_platform_data *pdata)
>> +{
>> + unsigned long req_start = jiffies;
>> + unsigned int override_npages = 0;
>> + u64 override_err = 0;
>> + int rc;
>> +
>
> ...
>
>> -e_free_ctx:
>> - kfree(snp_dev->ctx);
>> e_free_cert_data:
>> free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
>> -e_free_response:
>> - free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
>> -e_free_request:
>> - free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
>> -e_unmap:
>> - iounmap(mapping);
>> + e_free_ctx:
>> + kfree(snp_dev->ctx);
>> +e_free_snpdev:
>> + kfree(snp_dev);
>> return ret;
>> }
>> @@ -780,11 +332,9 @@ static int __exit sev_guest_remove(struct platform_device *pdev)
>> {
>> struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
>> - free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
>
> Looks like this one should still be here, right?
Yes, this should still be there.
>
> Thanks,
> Tom
Regards
Nikunj
Powered by blists - more mailing lists