[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220118132121.31388-13-chao.p.peng@linux.intel.com>
Date: Tue, 18 Jan 2022 21:21:21 +0800
From: Chao Peng <chao.p.peng@...ux.intel.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
qemu-devel@...gnu.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
Hugh Dickins <hughd@...gle.com>,
Jeff Layton <jlayton@...nel.org>,
"J . Bruce Fields" <bfields@...ldses.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Yu Zhang <yu.c.zhang@...ux.intel.com>,
Chao Peng <chao.p.peng@...ux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
luto@...nel.org, jun.nakajima@...el.com, dave.hansen@...el.com,
ak@...ux.intel.com, david@...hat.com
Subject: [PATCH v4 12/12] KVM: Expose KVM_MEM_PRIVATE
KVM_MEM_PRIVATE is not exposed by default but architecture code can turn
on it by implementing kvm_arch_private_memory_supported().
Also private memslot cannot be movable and the same file+offset can not
be mapped into different GFNs.
Signed-off-by: Yu Zhang <yu.c.zhang@...ux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@...ux.intel.com>
---
include/linux/kvm_host.h | 1 +
virt/kvm/kvm_main.c | 49 ++++++++++++++++++++++++++++++++++------
2 files changed, 43 insertions(+), 7 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 117cf0da9c5e..444b390261c0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1328,6 +1328,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
int kvm_arch_create_vm_debugfs(struct kvm *kvm);
+bool kvm_arch_private_memory_supported(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 10e553215618..51d0f08a8601 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1491,10 +1491,19 @@ static void kvm_replace_memslot(struct kvm *kvm,
}
}
-static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
+bool __weak kvm_arch_private_memory_supported(struct kvm *kvm)
+{
+ return false;
+}
+
+static int check_memory_region_flags(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
+ if (kvm_arch_private_memory_supported(kvm))
+ valid_flags |= KVM_MEM_PRIVATE;
+
#ifdef __KVM_HAVE_READONLY_MEM
valid_flags |= KVM_MEM_READONLY;
#endif
@@ -1873,15 +1882,32 @@ static int kvm_set_memslot(struct kvm *kvm,
}
static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
- gfn_t start, gfn_t end)
+ struct file *file,
+ gfn_t start, gfn_t end,
+ loff_t start_off, loff_t end_off)
{
struct kvm_memslot_iter iter;
+ struct kvm_memory_slot *slot;
+ struct inode *inode;
+ int bkt;
kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
if (iter.slot->id != id)
return true;
}
+ /* Disallow mapping the same file+offset into multiple gfns. */
+ if (file) {
+ inode = file_inode(file);
+ kvm_for_each_memslot(slot, bkt, slots) {
+ if (slot->private_file &&
+ file_inode(slot->private_file) == inode &&
+ !(end_off <= slot->private_offset ||
+ start_off >= slot->private_offset
+ + (slot->npages >> PAGE_SHIFT)))
+ return true;
+ }
+ }
return false;
}
@@ -1906,7 +1932,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
int as_id, id;
int r;
- r = check_memory_region_flags(mem);
+ r = check_memory_region_flags(kvm, mem);
if (r)
return r;
@@ -1919,10 +1945,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
- /* We can read the guest memory with __xxx_user() later on. */
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
- (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
- !access_ok((void __user *)(unsigned long)mem->userspace_addr,
+ (mem->userspace_addr != untagged_addr(mem->userspace_addr)))
+ return -EINVAL;
+ /* We can read the guest memory with __xxx_user() later on. */
+ if (!(mem->flags & KVM_MEM_PRIVATE) &&
+ !access_ok((void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size))
return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
@@ -1963,6 +1991,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
return -EINVAL;
} else { /* Modify an existing slot. */
+ /* Private memslots are immutable, they can only be deleted. */
+ if (mem->flags & KVM_MEM_PRIVATE)
+ return -EINVAL;
if ((mem->userspace_addr != old->userspace_addr) ||
(npages != old->npages) ||
((mem->flags ^ old->flags) & KVM_MEM_READONLY))
@@ -1983,7 +2014,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
}
if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
- kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) {
+ kvm_check_memslot_overlap(slots, id, file,
+ base_gfn, base_gfn + npages,
+ region_ext->private_offset,
+ region_ext->private_offset +
+ mem->memory_size)) {
r = -EEXIST;
goto out;
}
--
2.17.1
Powered by blists - more mailing lists