[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211221151125.19446-16-chao.p.peng@linux.intel.com>
Date: Tue, 21 Dec 2021 23:11:25 +0800
From: Chao Peng <chao.p.peng@...ux.intel.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
qemu-devel@...gnu.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
Hugh Dickins <hughd@...gle.com>,
Jeff Layton <jlayton@...nel.org>,
"J . Bruce Fields" <bfields@...ldses.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Yu Zhang <yu.c.zhang@...ux.intel.com>,
Chao Peng <chao.p.peng@...ux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
luto@...nel.org, john.ji@...el.com, susie.li@...el.com,
jun.nakajima@...el.com, dave.hansen@...el.com, ak@...ux.intel.com,
david@...hat.com
Subject: [PATCH v3 15/15] KVM: Register/unregister private memory slot to memfd
Expose KVM_MEM_PRIVATE flag and register/unregister private memory
slot to memfd when userspace sets the flag.
KVM_MEM_PRIVATE is disallowed by default but architecture code can
turn it on by implementing kvm_arch_private_memory_supported().
Signed-off-by: Yu Zhang <yu.c.zhang@...ux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@...ux.intel.com>
---
include/linux/kvm_host.h | 2 ++
virt/kvm/kvm_main.c | 35 +++++++++++++++++++++++++++++++----
2 files changed, 33 insertions(+), 4 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0c53df0a6b2e..0f0e24f19892 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1096,6 +1096,8 @@ int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
int kvm_arch_create_vm_debugfs(struct kvm *kvm);
bool kvm_arch_dirty_log_supported(struct kvm *kvm);
+bool kvm_arch_private_memory_supported(struct kvm *kvm);
+
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 79313c549fb9..6eb0d86abdcf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1509,6 +1509,11 @@ bool __weak kvm_arch_dirty_log_supported(struct kvm *kvm)
return true;
}
+bool __weak kvm_arch_private_memory_supported(struct kvm *kvm)
+{
+ return false;
+}
+
static int check_memory_region_flags(struct kvm *kvm,
const struct kvm_userspace_memory_region_ext *mem)
{
@@ -1517,6 +1522,9 @@ static int check_memory_region_flags(struct kvm *kvm,
if (kvm_arch_dirty_log_supported(kvm))
valid_flags |= KVM_MEM_LOG_DIRTY_PAGES;
+ if (kvm_arch_private_memory_supported(kvm))
+ valid_flags |= KVM_MEM_PRIVATE;
+
#ifdef __KVM_HAVE_READONLY_MEM
valid_flags |= KVM_MEM_READONLY;
#endif
@@ -1708,9 +1716,21 @@ static int kvm_set_memslot(struct kvm *kvm,
/* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
memcpy(&new->arch, &old.arch, sizeof(old.arch));
+ if (mem->flags & KVM_MEM_PRIVATE && change == KVM_MR_CREATE) {
+ r = kvm_memfd_register(kvm, mem, new);
+ if (r)
+ goto out_slots;
+ }
+
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
- if (r)
+ if (r) {
+ if (mem->flags & KVM_MEM_PRIVATE && change == KVM_MR_CREATE)
+ kvm_memfd_unregister(kvm, new);
goto out_slots;
+ }
+
+ if (mem->flags & KVM_MEM_PRIVATE && change == KVM_MR_DELETE)
+ kvm_memfd_unregister(kvm, new);
update_memslots(slots, new, change);
slots = install_new_memslots(kvm, as_id, slots);
@@ -1786,10 +1806,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
- /* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
- (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
- !access_ok((void __user *)(unsigned long)mem->userspace_addr,
+ (mem->userspace_addr != untagged_addr(mem->userspace_addr)))
+ return -EINVAL;
+ /* We can read the guest memory with __xxx_user() later on. */
+ if (!(mem->flags & KVM_MEM_PRIVATE) &&
+ !access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
@@ -1821,6 +1843,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.npages = mem->memory_size >> PAGE_SHIFT;
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;
+ new.file = NULL;
+ new.file_ofs = 0;
if (new.npages > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
@@ -1829,6 +1853,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
change = KVM_MR_CREATE;
new.dirty_bitmap = NULL;
} else { /* Modify an existing slot. */
+ /* Private memslots are immutable, they can only be deleted. */
+ if (mem->flags & KVM_MEM_PRIVATE)
+ return -EINVAL;
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
((new.flags ^ old.flags) & KVM_MEM_READONLY))
--
2.17.1
Powered by blists - more mailing lists