[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1455449503-20993-6-git-send-email-guangrong.xiao@linux.intel.com>
Date: Sun, 14 Feb 2016 19:31:37 +0800
From: Xiao Guangrong <guangrong.xiao@...ux.intel.com>
To: pbonzini@...hat.com
Cc: gleb@...nel.org, mtosatti@...hat.com, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, kai.huang@...ux.intel.com,
jike.song@...el.com,
Xiao Guangrong <guangrong.xiao@...ux.intel.com>
Subject: [PATCH v3 05/11] KVM: page track: introduce kvm_page_track_{add,remove}_page
These two functions are the user APIs:
- kvm_page_track_add_page(): add the page to the tracking pool after
that later specified access on that page will be tracked
- kvm_page_track_remove_page(): remove the page from the tracking pool,
the specified access on the page is not tracked after the last user is
gone
Both of these are called under the protection of kvm->srcu or
kvm->slots_lock
Signed-off-by: Xiao Guangrong <guangrong.xiao@...ux.intel.com>
---
arch/x86/include/asm/kvm_page_track.h | 13 ++++
arch/x86/kvm/page_track.c | 124 ++++++++++++++++++++++++++++++++++
2 files changed, 137 insertions(+)
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index 55200406..c010124 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -10,4 +10,17 @@ void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
unsigned long npages);
+
+void
+kvm_slot_page_track_add_page_nolock(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode);
+void kvm_page_track_add_page(struct kvm *kvm, gfn_t gfn,
+ enum kvm_page_track_mode mode);
+void kvm_slot_page_track_remove_page_nolock(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn,
+ enum kvm_page_track_mode mode);
+void kvm_page_track_remove_page(struct kvm *kvm, gfn_t gfn,
+ enum kvm_page_track_mode mode);
#endif
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 8c396d0..e17efe9 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -50,3 +50,127 @@ track_free:
kvm_page_track_free_memslot(slot, NULL);
return -ENOMEM;
}
+
+static bool check_mode(enum kvm_page_track_mode mode)
+{
+ if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
+ return false;
+
+ return true;
+}
+
+static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode, short count)
+{
+ int index;
+ unsigned short val;
+
+ index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
+
+ val = slot->arch.gfn_track[mode][index];
+
+ /* does tracking count wrap? */
+ WARN_ON((count > 0) && (val + count < val));
+ /* the last tracker has already gone? */
+ WARN_ON((count < 0) && (val < !count));
+
+ slot->arch.gfn_track[mode][index] += count;
+}
+
+void
+kvm_slot_page_track_add_page_nolock(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+
+ WARN_ON(!check_mode(mode));
+
+ update_gfn_track(slot, gfn, mode, 1);
+
+ /*
+ * new track stops large page mapping for the
+ * tracked page.
+ */
+ kvm_mmu_gfn_disallow_lpage(slot, gfn);
+
+ if (mode == KVM_PAGE_TRACK_WRITE)
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+ kvm_flush_remote_tlbs(kvm);
+}
+
+/*
+ * add guest page to the tracking pool so that corresponding access on that
+ * page will be intercepted.
+ *
+ * It should be called under the protection of kvm->srcu or kvm->slots_lock
+ *
+ * @kvm: the guest instance we are interested in.
+ * @gfn: the guest page.
+ * @mode: tracking mode, currently only write track is supported.
+ */
+void kvm_page_track_add_page(struct kvm *kvm, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ int i;
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+
+ slot = __gfn_to_memslot(slots, gfn);
+ if (!slot)
+ continue;
+
+ spin_lock(&kvm->mmu_lock);
+ kvm_slot_page_track_add_page_nolock(kvm, slot, gfn, mode);
+ spin_unlock(&kvm->mmu_lock);
+ }
+}
+
+void kvm_slot_page_track_remove_page_nolock(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+ WARN_ON(!check_mode(mode));
+
+ update_gfn_track(slot, gfn, mode, -1);
+
+ /*
+ * allow large page mapping for the tracked page
+ * after the tracker is gone.
+ */
+ kvm_mmu_gfn_allow_lpage(slot, gfn);
+}
+
+/*
+ * remove the guest page from the tracking pool which stops the interception
+ * of corresponding access on that page. It is the opposed operation of
+ * kvm_page_track_add_page().
+ *
+ * It should be called under the protection of kvm->srcu or kvm->slots_lock
+ *
+ * @kvm: the guest instance we are interested in.
+ * @gfn: the guest page.
+ * @mode: tracking mode, currently only write track is supported.
+ */
+void kvm_page_track_remove_page(struct kvm *kvm, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ int i;
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+
+ slot = __gfn_to_memslot(slots, gfn);
+ if (!slot)
+ continue;
+
+ spin_lock(&kvm->mmu_lock);
+ kvm_slot_page_track_remove_page_nolock(kvm, slot, gfn, mode);
+ spin_unlock(&kvm->mmu_lock);
+ }
+}
--
1.8.3.1
Powered by blists - more mailing lists