[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181020222127.6368-3-ahmedsoliman0x666@gmail.com>
Date: Sun, 21 Oct 2018 00:21:24 +0200
From: Ahmed Abd El Mawgood <ahmedsoliman0x666@...il.com>
To: Paolo Bonzini <pbonzini@...hat.com>, rkrcmar@...hat.com,
Jonathan Corbet <corbet@....net>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
hpa@...or.com, x86@...nel.org, kvm@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
ahmedsoliman0x666@...il.com, Ovich00@...il.com,
kernel-hardening@...ts.openwall.com, nigel.edwards@....com,
Boris Lukashev <blukashev@...pervictus.com>,
Hossam Hassan <7ossam9063@...il.com>,
Ahmed Lotfy <A7med.lotfey@...il.com>
Subject: [PATCH V4 2/5] KVM: X86: Adding arbitrary data pointer in kvm memslot iterator functions
This will help sharing data into the slot_level_handler callback. In my
case I need to a share a counter for the pages traversed to use it in some
bitmap. Being able to send arbitrary memory pointer into the
slot_level_handler callback made it easy.
Signed-off-by: Ahmed Abd El Mawgood <ahmedsoliman0x666@...il.com>
---
arch/x86/kvm/mmu.c | 65 ++++++++++++++++++++++++++--------------------
1 file changed, 37 insertions(+), 28 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 51b953ad9d4e..cc36abe1ee44 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1486,7 +1486,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
static bool __rmap_write_protect(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
- bool pt_protect)
+ bool pt_protect, void *data)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1525,7 +1525,8 @@ static bool wrprot_ad_disabled_spte(u64 *sptep)
* - W bit on ad-disabled SPTEs.
* Returns true iff any D or W bits were cleared.
*/
-static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ void *data)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1551,7 +1552,8 @@ static bool spte_set_dirty(u64 *sptep)
return mmu_spte_update(sptep, spte);
}
-static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ void *data)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1583,7 +1585,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
- __rmap_write_protect(kvm, rmap_head, false);
+ __rmap_write_protect(kvm, rmap_head, false, NULL);
/* clear the first set bit */
mask &= mask - 1;
@@ -1609,7 +1611,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
- __rmap_clear_dirty(kvm, rmap_head);
+ __rmap_clear_dirty(kvm, rmap_head, NULL);
/* clear the first set bit */
mask &= mask - 1;
@@ -1662,7 +1664,8 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ write_protected |= __rmap_write_protect(kvm, rmap_head, true,
+ NULL);
}
return write_protected;
@@ -1676,7 +1679,8 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}
-static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
+static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ void *data)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1696,7 +1700,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{
- return kvm_zap_rmapp(kvm, rmap_head);
+ return kvm_zap_rmapp(kvm, rmap_head, NULL);
}
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -5465,13 +5469,15 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
}
/* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+typedef bool (*slot_level_handler) (struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head, void *data);
/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
- gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
+ gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb,
+ void *data)
{
struct slot_rmap_walk_iterator iterator;
bool flush = false;
@@ -5479,7 +5485,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
end_gfn, &iterator) {
if (iterator.rmap)
- flush |= fn(kvm, iterator.rmap);
+ flush |= fn(kvm, iterator.rmap, data);
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
@@ -5501,36 +5507,36 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
- bool lock_flush_tlb)
+ bool lock_flush_tlb, void *data)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
- lock_flush_tlb);
+ lock_flush_tlb, data);
}
static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
+ slot_level_handler fn, bool lock_flush_tlb, void *data)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
- PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb, data);
}
static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
+ slot_level_handler fn, bool lock_flush_tlb, void *data)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
- PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb, data);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
+ slot_level_handler fn, bool lock_flush_tlb, void *data)
{
return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
- PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+ PT_PAGE_TABLE_LEVEL, lock_flush_tlb, data);
}
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
@@ -5552,7 +5558,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true);
+ start, end - 1, true, NULL);
}
}
@@ -5560,9 +5566,10 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
}
static bool slot_rmap_write_protect(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head)
+ struct kvm_rmap_head *rmap_head,
+ void *data)
{
- return __rmap_write_protect(kvm, rmap_head, false);
+ return __rmap_write_protect(kvm, rmap_head, false, data);
}
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
@@ -5572,7 +5579,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
- false);
+ false, NULL);
spin_unlock(&kvm->mmu_lock);
/*
@@ -5598,7 +5605,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
}
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head)
+ struct kvm_rmap_head *rmap_head,
+ void *data)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -5636,7 +5644,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
spin_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
- kvm_mmu_zap_collapsible_spte, true);
+ kvm_mmu_zap_collapsible_spte, true, NULL);
spin_unlock(&kvm->mmu_lock);
}
@@ -5646,7 +5654,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
bool flush;
spin_lock(&kvm->mmu_lock);
- flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
+ flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false, NULL);
spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock);
@@ -5669,7 +5677,7 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
- false);
+ false, NULL);
spin_unlock(&kvm->mmu_lock);
/* see kvm_mmu_slot_remove_write_access */
@@ -5686,7 +5694,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
bool flush;
spin_lock(&kvm->mmu_lock);
- flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
+ flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false,
+ NULL);
spin_unlock(&kvm->mmu_lock);
lockdep_assert_held(&kvm->slots_lock);
--
2.18.1
Powered by blists - more mailing lists