[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7132d855-d855-43c4-83a7-a6d165fc1a75@amd.com>
Date: Fri, 12 Sep 2025 09:52:40 -0500
From: Tom Lendacky <thomas.lendacky@....com>
To: Ashish Kalra <Ashish.Kalra@....com>, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com, x86@...nel.org,
hpa@...or.com, seanjc@...gle.com, pbonzini@...hat.com,
herbert@...dor.apana.org.au
Cc: nikunj@....com, davem@...emloft.net, aik@....com, ardb@...nel.org,
john.allen@....com, michael.roth@....com, Neeraj.Upadhyay@....com,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-crypto@...r.kernel.org
Subject: Re: [PATCH v4 1/3] x86/sev: Add new dump_rmp parameter to
snp_leak_pages() API
On 9/10/25 17:55, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@....com>
>
> When leaking certain page types, such as Hypervisor Fixed (HV_FIXED)
> pages, it does not make sense to dump RMP contents for the 2MB range of
> the page(s) being leaked. In the case of HV_FIXED pages, this is not an
> error situation where the surrounding 2MB page RMP entries can provide
> debug information.
>
> Add new __snp_leak_pages() API with dump_rmp bool parameter to support
> continue adding pages to the snp_leaked_pages_list but not issue
> dump_rmpentry().
>
> Make snp_leak_pages() a wrapper for the common case which also allows
> existing users to continue to dump RMP entries.
>
> Suggested-by: Thomas Lendacky <Thomas.Lendacky@....com>
> Suggested-by: Sean Christopherson <seanjc@...gle.com>
> Signed-off-by: Ashish Kalra <ashish.kalra@....com>
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
> ---
> arch/x86/include/asm/sev.h | 8 +++++++-
> arch/x86/virt/svm/sev.c | 7 ++++---
> 2 files changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index 00475b814ac4..7a1ae990b15f 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -635,10 +635,15 @@ void snp_dump_hva_rmpentry(unsigned long address);
> int psmash(u64 pfn);
> int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
> int rmp_make_shared(u64 pfn, enum pg_level level);
> -void snp_leak_pages(u64 pfn, unsigned int npages);
> +void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp);
> void kdump_sev_callback(void);
> void snp_fixup_e820_tables(void);
>
> +static inline void snp_leak_pages(u64 pfn, unsigned int pages)
> +{
> + __snp_leak_pages(pfn, pages, true);
> +}
> +
> static inline void sev_evict_cache(void *va, int npages)
> {
> volatile u8 val __always_unused;
> @@ -668,6 +673,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
> return -ENODEV;
> }
> static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
> +static inline void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp) {}
> static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
> static inline void kdump_sev_callback(void) { }
> static inline void snp_fixup_e820_tables(void) {}
> diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
> index 942372e69b4d..ee643a6cd691 100644
> --- a/arch/x86/virt/svm/sev.c
> +++ b/arch/x86/virt/svm/sev.c
> @@ -1029,7 +1029,7 @@ int rmp_make_shared(u64 pfn, enum pg_level level)
> }
> EXPORT_SYMBOL_GPL(rmp_make_shared);
>
> -void snp_leak_pages(u64 pfn, unsigned int npages)
> +void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp)
> {
> struct page *page = pfn_to_page(pfn);
>
> @@ -1052,14 +1052,15 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
> (PageHead(page) && compound_nr(page) <= npages))
> list_add_tail(&page->buddy_list, &snp_leaked_pages_list);
>
> - dump_rmpentry(pfn);
> + if (dump_rmp)
> + dump_rmpentry(pfn);
> snp_nr_leaked_pages++;
> pfn++;
> page++;
> }
> spin_unlock(&snp_leaked_pages_list_lock);
> }
> -EXPORT_SYMBOL_GPL(snp_leak_pages);
> +EXPORT_SYMBOL_GPL(__snp_leak_pages);
>
> void kdump_sev_callback(void)
> {
Powered by blists - more mailing lists