[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20251113230759.1562024-4-michael.roth@amd.com>
Date: Thu, 13 Nov 2025 17:07:59 -0600
From: Michael Roth <michael.roth@....com>
To: <kvm@...r.kernel.org>
CC: <linux-coco@...ts.linux.dev>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>, <thomas.lendacky@....com>,
<pbonzini@...hat.com>, <seanjc@...gle.com>, <vbabka@...e.cz>,
<ashish.kalra@....com>, <liam.merwick@...cle.com>, <david@...hat.com>,
<vannapurve@...gle.com>, <ackerleytng@...gle.com>, <aik@....com>,
<ira.weiny@...el.com>, <yan.y.zhao@...el.com>
Subject: [PATCH 3/3] KVM: guest_memfd: GUP source pages prior to populating guest memory
Currently the post-populate callbacks handle copying source pages into
private GPA ranges backed by guest_memfd, where kvm_gmem_populate()
acquires the filemap invalidate lock, then calls a post-populate
callback which may issue a get_user_pages() on the source pages prior to
copying them into the private GPA (e.g. TDX).
This will not be compatible with in-place conversion, where the
userspace page fault path will attempt to acquire filemap invalidate
lock while holding the mm->mmap_lock, leading to a potential ABBA
deadlock[1].
Address this by hoisting the GUP above the filemap invalidate lock so
that these page faults path can be taken early, prior to acquiring the
filemap invalidate lock.
It's not currently clear whether this issue is reachable with the
current implementation of guest_memfd, which doesn't support in-place
conversion, however it does provide a consistent mechanism to provide
stable source/target PFNs to callbacks rather than punting to
vendor-specific code, which allows for more commonality across
architectures, which may be worthwhile even without in-place conversion.
Suggested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Michael Roth <michael.roth@....com>
---
arch/x86/kvm/svm/sev.c | 40 ++++++++++++++++++++++++++------------
arch/x86/kvm/vmx/tdx.c | 21 +++++---------------
include/linux/kvm_host.h | 3 ++-
virt/kvm/guest_memfd.c | 42 ++++++++++++++++++++++++++++++++++------
4 files changed, 71 insertions(+), 35 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0835c664fbfd..d0ac710697a2 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2260,7 +2260,8 @@ struct sev_gmem_populate_args {
};
static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn,
- void __user *src, int order, void *opaque)
+ struct page **src_pages, loff_t src_offset,
+ int order, void *opaque)
{
struct sev_gmem_populate_args *sev_populate_args = opaque;
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
@@ -2268,7 +2269,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
int npages = (1 << order);
gfn_t gfn;
- if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src))
+ if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src_pages))
return -EINVAL;
for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
@@ -2284,14 +2285,21 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
goto err;
}
- if (src) {
- void *vaddr = kmap_local_pfn(pfn + i);
+ if (src_pages) {
+ void *src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i]));
+ void *dst_vaddr = kmap_local_pfn(pfn + i);
- if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
- ret = -EFAULT;
- goto err;
+ memcpy(dst_vaddr, src_vaddr + src_offset, PAGE_SIZE - src_offset);
+ kunmap_local(src_vaddr);
+
+ if (src_offset) {
+ src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i + 1]));
+
+ memcpy(dst_vaddr + PAGE_SIZE - src_offset, src_vaddr, src_offset);
+ kunmap_local(src_vaddr);
}
- kunmap_local(vaddr);
+
+ kunmap_local(dst_vaddr);
}
ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
@@ -2331,12 +2339,20 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
if (!snp_page_reclaim(kvm, pfn + i) &&
sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
- void *vaddr = kmap_local_pfn(pfn + i);
+ void *src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i]));
+ void *dst_vaddr = kmap_local_pfn(pfn + i);
- if (copy_to_user(src + i * PAGE_SIZE, vaddr, PAGE_SIZE))
- pr_debug("Failed to write CPUID page back to userspace\n");
+ memcpy(src_vaddr + src_offset, dst_vaddr, PAGE_SIZE - src_offset);
+ kunmap_local(src_vaddr);
+
+ if (src_offset) {
+ src_vaddr = kmap_local_pfn(page_to_pfn(src_pages[i + 1]));
+
+ memcpy(src_vaddr, dst_vaddr + PAGE_SIZE - src_offset, src_offset);
+ kunmap_local(src_vaddr);
+ }
- kunmap_local(vaddr);
+ kunmap_local(dst_vaddr);
}
/* pfn + i is hypervisor-owned now, so skip below cleanup for it. */
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 57ed101a1181..dd5439ec1473 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -3115,37 +3115,26 @@ struct tdx_gmem_post_populate_arg {
};
static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, int order, void *_arg)
+ struct page **src_pages, loff_t src_offset,
+ int order, void *_arg)
{
struct tdx_gmem_post_populate_arg *arg = _arg;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
u64 err, entry, level_state;
gpa_t gpa = gfn_to_gpa(gfn);
- struct page *src_page;
int ret, i;
if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm))
return -EIO;
- if (KVM_BUG_ON(!PAGE_ALIGNED(src), kvm))
+ /* Source should be page-aligned, in which case src_offset will be 0. */
+ if (KVM_BUG_ON(src_offset))
return -EINVAL;
- /*
- * Get the source page if it has been faulted in. Return failure if the
- * source page has been swapped out or unmapped in primary memory.
- */
- ret = get_user_pages_fast((unsigned long)src, 1, 0, &src_page);
- if (ret < 0)
- return ret;
- if (ret != 1)
- return -ENOMEM;
-
- kvm_tdx->page_add_src = src_page;
+ kvm_tdx->page_add_src = src_pages[i];
ret = kvm_tdp_mmu_map_private_pfn(arg->vcpu, gfn, pfn);
kvm_tdx->page_add_src = NULL;
- put_page(src_page);
-
if (ret || !(arg->flags & KVM_TDX_MEASURE_MEMORY_REGION))
return ret;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d93f75b05ae2..7e9d2403c61f 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2581,7 +2581,8 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
* Returns the number of pages that were populated.
*/
typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- void __user *src, int order, void *opaque);
+ struct page **src_pages, loff_t src_offset,
+ int order, void *opaque);
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque);
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 9160379df378..e9ac3fd4fd8f 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -814,14 +814,17 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn);
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
+
+#define GMEM_GUP_NPAGES (1UL << PMD_ORDER)
+
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque)
{
struct kvm_memory_slot *slot;
- void __user *p;
-
+ struct page **src_pages;
int ret = 0, max_order;
- long i;
+ loff_t src_offset = 0;
+ long i, src_npages;
lockdep_assert_held(&kvm->slots_lock);
@@ -836,9 +839,28 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
if (!file)
return -EFAULT;
+ npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
+ npages = min_t(ulong, npages, GMEM_GUP_NPAGES);
+
+ if (src) {
+ src_npages = IS_ALIGNED((unsigned long)src, PAGE_SIZE) ? npages : npages + 1;
+
+ src_pages = kmalloc_array(src_npages, sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+
+ ret = get_user_pages_fast((unsigned long)src, src_npages, 0, src_pages);
+ if (ret < 0)
+ return ret;
+
+ if (ret != src_npages)
+ return -ENOMEM;
+
+ src_offset = (loff_t)(src - PTR_ALIGN_DOWN(src, PAGE_SIZE));
+ }
+
filemap_invalidate_lock(file->f_mapping);
- npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
for (i = 0; i < npages; i += (1 << max_order)) {
struct folio *folio;
gfn_t gfn = start_gfn + i;
@@ -869,8 +891,8 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
max_order--;
}
- p = src ? src + i * PAGE_SIZE : NULL;
- ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
+ ret = post_populate(kvm, gfn, pfn, src ? &src_pages[i] : NULL,
+ src_offset, max_order, opaque);
if (!ret)
folio_mark_uptodate(folio);
@@ -882,6 +904,14 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
filemap_invalidate_unlock(file->f_mapping);
+ if (src) {
+ long j;
+
+ for (j = 0; j < src_npages; j++)
+ put_page(src_pages[j]);
+ kfree(src_pages);
+ }
+
return ret && !i ? ret : i;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
--
2.25.1
Powered by blists - more mailing lists