[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190104085405.40356-5-Tianyu.Lan@microsoft.com>
Date: Fri, 4 Jan 2019 16:53:58 +0800
From: lantianyu1986@...il.com
To: unlisted-recipients:; (no To-header on input)
Cc: Lan Tianyu <Tianyu.Lan@...rosoft.com>, christoffer.dall@....com,
marc.zyngier@....com, linux@...linux.org.uk,
catalin.marinas@....com, will.deacon@....com, jhogan@...nel.org,
ralf@...ux-mips.org, paul.burton@...s.com, paulus@...abs.org,
benh@...nel.crashing.org, mpe@...erman.id.au, pbonzini@...hat.com,
rkrcmar@...hat.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, hpa@...or.com, x86@...nel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org, linux-mips@...r.kernel.org,
kvm-ppc@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
kvm@...r.kernel.org, michael.h.kelley@...rosoft.com,
kys@...rosoft.com, vkuznets@...hat.com
Subject: [PATCH 4/11] KVM/MMU: Introduce tlb flush with range list
From: Lan Tianyu <Tianyu.Lan@...rosoft.com>
This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().
Signed-off-by: Lan Tianyu <Tianyu.Lan@...rosoft.com>
---
arch/x86/include/asm/kvm_host.h | 7 +++++++
arch/x86/kvm/mmu.c | 24 +++++++++++++++++++++++-
2 files changed, 30 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 78d2a6714c3b..22dbaa8fba32 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -316,6 +316,12 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
+
+ /*
+ * Tlb flush with range list uses struct kvm_mmu_page as list entry
+ * and all list operations should be under protection of mmu_lock.
+ */
+ struct list_head flush_link;
struct hlist_node hash_link;
bool unsync;
@@ -443,6 +449,7 @@ struct kvm_mmu {
struct kvm_tlb_range {
u64 start_gfn;
u64 pages;
+ struct list_head *flush_list;
};
enum pmc_type {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 068694fa2371..d3272c5066ea 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,17 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
range.start_gfn = start_gfn;
range.pages = pages;
+ range.flush_list = NULL;
+
+ kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+ struct list_head *flush_list)
+{
+ struct kvm_tlb_range range;
+
+ range.flush_list = flush_list;
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
@@ -2708,6 +2719,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp, *nsp;
+ LIST_HEAD(flush_list);
if (list_empty(invalid_list))
return;
@@ -2721,7 +2733,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
* In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
* guest mode and/or lockless shadow page table walks.
*/
- kvm_flush_remote_tlbs(kvm);
+ if (kvm_available_flush_tlb_with_range()) {
+ list_for_each_entry(sp, invalid_list, link)
+ if (sp->sptep && is_last_spte(*sp->sptep,
+ sp->role.level))
+ list_add(&sp->flush_link, &flush_list);
+
+ if (!list_empty(&flush_list))
+ kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+ } else {
+ kvm_flush_remote_tlbs(kvm);
+ }
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);
--
2.14.4
Powered by blists - more mailing lists