[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c36a12f6-33bd-4430-92b2-5fd2939d9b24@amd.com>
Date: Fri, 12 Sep 2025 09:57:37 -0500
From: Tom Lendacky <thomas.lendacky@....com>
To: Ashish Kalra <Ashish.Kalra@....com>, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com, x86@...nel.org,
hpa@...or.com, seanjc@...gle.com, pbonzini@...hat.com,
herbert@...dor.apana.org.au
Cc: nikunj@....com, davem@...emloft.net, aik@....com, ardb@...nel.org,
john.allen@....com, michael.roth@....com, Neeraj.Upadhyay@....com,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-crypto@...r.kernel.org
Subject: Re: [PATCH v4 2/3] crypto: ccp - Add new HV-Fixed page
allocation/free API.
On 9/10/25 17:55, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@....com>
>
> When SEV-SNP is active, the TEE extended command header page and
> all output buffers for TEE extended commands (such as used by Seamless
> Firmware servicing support) must be in hypervisor-fixed state,
> assigned to the hypervisor and marked immutable in the RMP entrie(s).
>
> Add a new generic SEV API interface to allocate/free hypervisor fixed
> pages which abstracts hypervisor fixed page allocation/free for PSP
> sub devices. The API internally uses SNP_INIT_EX to transition pages
> to HV-Fixed page state.
>
> If SNP is not enabled then the allocator is simply a wrapper over
> alloc_pages() and __free_pages().
>
> When the sub device free the pages, they are put on a free list
> and future allocation requests will try to re-use the freed pages from
> this list. But this list is not preserved across PSP driver load/unload
> hence this free/reuse support is only supported while PSP driver is
> loaded. As HV_FIXED page state is only changed at reboot, these pages
> are leaked as they cannot be returned back to the page allocator and
> then potentially allocated to guests, which will cause SEV-SNP guests
> to fail to start or terminate when accessing the HV_FIXED page.
>
> Suggested-by: Thomas Lendacky <Thomas.Lendacky@....com>
> Signed-off-by: Ashish Kalra <ashish.kalra@....com>
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
> ---
> drivers/crypto/ccp/sev-dev.c | 182 +++++++++++++++++++++++++++++++++++
> drivers/crypto/ccp/sev-dev.h | 3 +
> 2 files changed, 185 insertions(+)
>
> diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
> index 9e797cbdf038..2300673c6683 100644
> --- a/drivers/crypto/ccp/sev-dev.c
> +++ b/drivers/crypto/ccp/sev-dev.c
> @@ -83,6 +83,21 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
> static bool psp_dead;
> static int psp_timeout;
>
> +enum snp_hv_fixed_pages_state {
> + ALLOCATED,
> + HV_FIXED,
> +};
> +
> +struct snp_hv_fixed_pages_entry {
> + struct list_head list;
> + struct page *page;
> + unsigned int order;
> + bool free;
> + enum snp_hv_fixed_pages_state page_state;
> +};
> +
> +static LIST_HEAD(snp_hv_fixed_pages);
> +
> /* Trusted Memory Region (TMR):
> * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
> * to allocate the memory, which will return aligned memory for the specified
> @@ -1158,6 +1173,165 @@ static int snp_get_platform_data(struct sev_device *sev, int *error)
> return rc;
> }
>
> +/* Hypervisor Fixed pages API interface */
> +static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
> + enum snp_hv_fixed_pages_state page_state)
> +{
> + struct snp_hv_fixed_pages_entry *entry;
> +
> + /* List is protected by sev_cmd_mutex */
> + lockdep_assert_held(&sev_cmd_mutex);
> +
> + if (list_empty(&snp_hv_fixed_pages))
> + return;
> +
> + list_for_each_entry(entry, &snp_hv_fixed_pages, list)
> + entry->page_state = page_state;
> +}
> +
> +/*
> + * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
> + * 2MB pages are marked as HV_FIXED.
> + */
> +struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
> +{
> + struct psp_device *psp_master = psp_get_master_device();
> + struct snp_hv_fixed_pages_entry *entry;
> + struct sev_device *sev;
> + unsigned int order;
> + struct page *page;
> +
> + if (!psp_master || !psp_master->sev_data)
> + return NULL;
> +
> + sev = psp_master->sev_data;
> +
> + order = get_order(PMD_SIZE * num_2mb_pages);
> +
> + /*
> + * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
> + * also needs to be protected using the same mutex.
> + */
> + guard(mutex)(&sev_cmd_mutex);
> +
> + /*
> + * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
> + * page state, fail if SNP is already initialized.
> + */
> + if (sev->snp_initialized)
> + return NULL;
> +
> + /* Re-use freed pages that match the request */
> + list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
> + /* Hypervisor fixed page allocator implements exact fit policy */
> + if (entry->order == order && entry->free) {
> + entry->free = false;
> + memset(page_address(entry->page), 0,
> + (1 << entry->order) * PAGE_SIZE);
> + return entry->page;
> + }
> + }
> +
> + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
> + if (!page)
> + return NULL;
> +
> + entry = kzalloc(sizeof(*entry), GFP_KERNEL);
> + if (!entry) {
> + __free_pages(page, order);
> + return NULL;
> + }
> +
> + entry->page = page;
> + entry->order = order;
> + list_add_tail(&entry->list, &snp_hv_fixed_pages);
> +
> + return page;
> +}
> +
> +void snp_free_hv_fixed_pages(struct page *page)
> +{
> + struct psp_device *psp_master = psp_get_master_device();
> + struct snp_hv_fixed_pages_entry *entry, *nentry;
> +
> + if (!psp_master || !psp_master->sev_data)
> + return;
> +
> + /*
> + * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
> + * also needs to be protected using the same mutex.
> + */
> + guard(mutex)(&sev_cmd_mutex);
> +
> + list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
> + if (entry->page != page)
> + continue;
> +
> + /*
> + * HV_FIXED page state cannot be changed until reboot
> + * and they cannot be used by an SNP guest, so they cannot
> + * be returned back to the page allocator.
> + * Mark the pages as free internally to allow possible re-use.
> + */
> + if (entry->page_state == HV_FIXED) {
> + entry->free = true;
> + } else {
> + __free_pages(page, entry->order);
> + list_del(&entry->list);
> + kfree(entry);
> + }
> + return;
> + }
> +}
> +
> +static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
> +{
> + struct snp_hv_fixed_pages_entry *entry;
> + struct sev_data_range *range;
> + int num_elements;
> +
> + lockdep_assert_held(&sev_cmd_mutex);
> +
> + if (list_empty(&snp_hv_fixed_pages))
> + return;
> +
> + num_elements = list_count_nodes(&snp_hv_fixed_pages) +
> + range_list->num_elements;
> +
> + /*
> + * Ensure the list of HV_FIXED pages that will be passed to firmware
> + * do not exceed the page-sized argument buffer.
> + */
> + if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
> + dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
> + return;
> + }
> +
> + range = &range_list->ranges[range_list->num_elements];
> + list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
> + range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
> + range->page_count = 1 << entry->order;
> + range++;
> + }
> + range_list->num_elements = num_elements;
> +}
> +
> +static void snp_leak_hv_fixed_pages(void)
> +{
> + struct snp_hv_fixed_pages_entry *entry;
> +
> + /* List is protected by sev_cmd_mutex */
> + lockdep_assert_held(&sev_cmd_mutex);
> +
> + if (list_empty(&snp_hv_fixed_pages))
> + return;
> +
> + list_for_each_entry(entry, &snp_hv_fixed_pages, list)
> + if (entry->page_state == HV_FIXED)
> + __snp_leak_pages(page_to_pfn(entry->page),
> + 1 << entry->order, false);
> +}
> +
> static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
> {
> struct sev_data_range_list *range_list = arg;
> @@ -1248,6 +1422,12 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
> return rc;
> }
>
> + /*
> + * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
> + * HV_Fixed page list.
> + */
> + snp_add_hv_fixed_pages(sev, snp_range_list);
> +
> memset(&data, 0, sizeof(data));
>
> if (max_snp_asid) {
> @@ -1293,6 +1473,7 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
> return rc;
> }
>
> + snp_hv_fixed_pages_state_update(sev, HV_FIXED);
> sev->snp_initialized = true;
> dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
>
> @@ -1896,6 +2077,7 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
> return ret;
> }
>
> + snp_leak_hv_fixed_pages();
> sev->snp_initialized = false;
> dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
>
> diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
> index 5aed2595c9ae..ac03bd0848f7 100644
> --- a/drivers/crypto/ccp/sev-dev.h
> +++ b/drivers/crypto/ccp/sev-dev.h
> @@ -69,4 +69,7 @@ void sev_dev_destroy(struct psp_device *psp);
> void sev_pci_init(void);
> void sev_pci_exit(void);
>
> +struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
> +void snp_free_hv_fixed_pages(struct page *page);
> +
> #endif /* __SEV_DEV_H */
Powered by blists - more mailing lists