[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190104085405.40356-10-Tianyu.Lan@microsoft.com>
Date: Fri, 4 Jan 2019 16:54:03 +0800
From: lantianyu1986@...il.com
To: unlisted-recipients:; (no To-header on input)
Cc: Lan Tianyu <Tianyu.Lan@...rosoft.com>, christoffer.dall@....com,
marc.zyngier@....com, linux@...linux.org.uk,
catalin.marinas@....com, will.deacon@....com, jhogan@...nel.org,
ralf@...ux-mips.org, paul.burton@...s.com, paulus@...abs.org,
benh@...nel.crashing.org, mpe@...erman.id.au, pbonzini@...hat.com,
rkrcmar@...hat.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, hpa@...or.com, x86@...nel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org, linux-mips@...r.kernel.org,
kvm-ppc@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
kvm@...r.kernel.org, michael.h.kelley@...rosoft.com,
kys@...rosoft.com, vkuznets@...hat.com
Subject: [PATCH 9/11] KVM/MMU: Flush tlb in the kvm_mmu_write_protect_pt_masked()
From: Lan Tianyu <Tianyu.Lan@...rosoft.com>
This patch is to flush tlb in the kvm_mmu_write_protect_pt_masked() when
tlb range flush is available and make kvm_mmu_write_protect_pt_masked()
return flush request.
Signed-off-by: Lan Tianyu <Tianyu.Lan@...rosoft.com>
---
arch/x86/kvm/mmu.c | 25 ++++++++++++++++++-------
1 file changed, 18 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9d8ee6ea02db..30ed7a79335b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1624,20 +1624,30 @@ static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
* Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings.
*/
-static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+static bool kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
struct kvm_rmap_head *rmap_head;
+ bool flush = false;
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PT_PAGE_TABLE_LEVEL, slot);
- __rmap_write_protect(kvm, rmap_head, false);
+ flush |= __rmap_write_protect(kvm, rmap_head, false);
/* clear the first set bit */
mask &= mask - 1;
}
+
+ if (flush && kvm_available_flush_tlb_with_range()) {
+ kvm_flush_remote_tlbs_with_address(kvm,
+ slot->base_gfn + gfn_offset,
+ hweight_long(mask));
+ flush = false;
+ }
+
+ return flush;
}
/**
@@ -1683,13 +1693,14 @@ bool kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- if (kvm_x86_ops->enable_log_dirty_pt_masked)
+ if (kvm_x86_ops->enable_log_dirty_pt_masked) {
kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
mask);
- else
- kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
-
- return true;
+ return true;
+ } else {
+ return kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset,
+ mask);
+ }
}
/**
--
2.14.4
Powered by blists - more mailing lists