[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191022003537.13013-14-sean.j.christopherson@intel.com>
Date: Mon, 21 Oct 2019 17:35:35 -0700
From: Sean Christopherson <sean.j.christopherson@...el.com>
To: James Hogan <jhogan@...nel.org>,
Paul Mackerras <paulus@...abs.org>,
Christian Borntraeger <borntraeger@...ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Marc Zyngier <maz@...nel.org>
Cc: David Hildenbrand <david@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
James Morse <james.morse@....com>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
linux-mips@...r.kernel.org, kvm-ppc@...r.kernel.org,
kvm@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.cs.columbia.edu, linux-kernel@...r.kernel.org
Subject: [PATCH v2 13/15] KVM: Ensure validity of memslot with respect to kvm_get_dirty_log()
Rework kvm_get_dirty_log() so that it "returns" the associated memslot
on success. A future patch will rework memslot handling such that
id_to_memslot() can return NULL, returning the memslot makes it more
obvious that the validity of the memslot has been verified, i.e.
precludes the need to add validity checks in the arch code that are
technically unnecessary.
Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
---
arch/powerpc/kvm/book3s_pr.c | 6 +-----
arch/s390/kvm/kvm-s390.c | 12 ++----------
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 27 +++++++++++++++++++--------
4 files changed, 23 insertions(+), 24 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5368a5dbac22..f41a136d247f 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1860,7 +1860,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
@@ -1870,15 +1869,12 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r)
goto out;
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c0e9929bdb34..a66eb2b9bf71 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -613,9 +613,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
{
int r;
unsigned long n;
- struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int is_dirty = 0;
+ int is_dirty;
if (kvm_is_ucontrol(kvm))
return -EINVAL;
@@ -626,14 +625,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
- r = -ENOENT;
- if (!memslot->dirty_bitmap)
- goto out;
-
- kvm_arch_sync_dirty_log(kvm, memslot);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r)
goto out;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 80dd823a1a8b..4eb14a8cd9cb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -808,7 +808,7 @@ void kvm_arch_dirty_log_tlb_flush(struct kvm *kvm, struct kvm_memory_slot *slot)
#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
- int *is_dirty);
+ int *is_dirty, struct kvm_memory_slot **memslot);
#endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c0ce5081dd00..7e5a88ab57b6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1124,31 +1124,42 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
}
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty)
+/**
+ * kvm_get_dirty_log - get a snapshot of dirty pages
+ * @kvm: pointer to kvm instance
+ * @log: slot id and address to which we copy the log
+ * @is_dirty: set to '1' if any dirty pages were found
+ * @memslot: set to the associated memslot, always valid on success
+ */
+int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ int *is_dirty, struct kvm_memory_slot **memslot)
{
struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
int i, as_id, id;
unsigned long n;
unsigned long any = 0;
+ *memslot = NULL;
+ *is_dirty = 0;
+
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;
slots = __kvm_memslots(kvm, as_id);
- memslot = id_to_memslot(slots, id);
- if (!memslot->dirty_bitmap)
+ *memslot = id_to_memslot(slots, id);
+ if (!(*memslot)->dirty_bitmap)
return -ENOENT;
- n = kvm_dirty_bitmap_bytes(memslot);
+ kvm_arch_sync_dirty_log(kvm, *memslot);
+
+ n = kvm_dirty_bitmap_bytes(*memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
- any = memslot->dirty_bitmap[i];
+ any = (*memslot)->dirty_bitmap[i];
- if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
+ if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
return -EFAULT;
if (any)
--
2.22.0
Powered by blists - more mailing lists