[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180927034829.2230-9-Tianyu.Lan@microsoft.com>
Date: Thu, 27 Sep 2018 03:49:36 +0000
From: Tianyu Lan <Tianyu.Lan@...rosoft.com>
To: unlisted-recipients:; (no To-header on input)
CC: Tianyu Lan <Tianyu.Lan@...rosoft.com>,
KY Srinivasan <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
"tglx@...utronix.de" <tglx@...utronix.de>,
"mingo@...hat.com" <mingo@...hat.com>,
"hpa@...or.com" <hpa@...or.com>, "x86@...nel.org" <x86@...nel.org>,
"pbonzini@...hat.com" <pbonzini@...hat.com>,
"rkrcmar@...hat.com" <rkrcmar@...hat.com>,
"devel@...uxdriverproject.org" <devel@...uxdriverproject.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"Michael Kelley (EOSG)" <Michael.H.Kelley@...rosoft.com>,
vkuznets <vkuznets@...hat.com>,
Jork Loeser <Jork.Loeser@...rosoft.com>
Subject: [PATCH V3 8/13] KVM: Add spte's point in the struct kvm_mmu_page
It's necessary to check whether mmu page is last or large page when add
mmu page into flush list. "spte" is needed for such check and so add
spte point in the struct kvm_mmu_page.
Signed-off-by: Lan Tianyu <Tianyu.Lan@...rosoft.com>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.c | 5 +++++
arch/x86/kvm/paging_tmpl.h | 2 ++
3 files changed, 8 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c96bc4cbe4b7..d42d96e637b5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -296,6 +296,7 @@ struct kvm_mmu_page {
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
+ u64 *sptep;
/* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
unsigned long mmu_valid_gen;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7c508c4bd204..a1cbbc852271 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3162,6 +3162,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
pseudo_gfn = base_addr >> PAGE_SHIFT;
sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
iterator.level - 1, 1, ACC_ALL);
+ sp->sptep = iterator.sptep;
link_shadow_page(vcpu, iterator.sptep, sp);
}
@@ -3599,6 +3600,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
sp = kvm_mmu_get_page(vcpu, 0, 0,
vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
++sp->root_count;
+ sp->sptep = NULL;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = __pa(sp->spt);
} else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
@@ -3615,6 +3617,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
+ sp->sptep = NULL;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
@@ -3655,6 +3658,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
+ sp->sptep = NULL;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root;
return 0;
@@ -3692,6 +3696,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
+ sp->sptep = NULL;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.pae_root[i] = root | pm_mask;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 708a5e44861a..5cbaf7c4a729 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -632,6 +632,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
table_gfn = gw->table_gfn[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access);
+ sp->sptep = it.sptep;
}
/*
@@ -662,6 +663,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
true, direct_access);
+ sp->sptep = it.sptep;
link_shadow_page(vcpu, it.sptep, sp);
}
--
2.14.4
Powered by blists - more mailing lists