[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180918160906.9241-9-vkuznets@redhat.com>
Date: Tue, 18 Sep 2018 18:09:05 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Jim Mattson <jmattson@...gle.com>,
Liran Alon <liran.alon@...cle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH v1 RESEND 8/9] x86/kvm/mmu: check if tdp/shadow MMU reconfiguration is needed
MMU reconfiguration in init_kvm_tdp_mmu()/kvm_init_shadow_mmu() can be
avoided if the source data used to configure it didn't change; enhance
kvm_mmu_scache with the required fields and consolidate common code in
kvm_calc_mmu_role_common().
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
---
arch/x86/include/asm/kvm_host.h | 2 +
arch/x86/kvm/mmu.c | 86 ++++++++++++++++++++++++-----------------
2 files changed, 52 insertions(+), 36 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ab46e9493bd4..e14e008b30b9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -284,10 +284,12 @@ union kvm_mmu_scache {
struct {
unsigned int valid:1;
unsigned int execonly:1;
+ unsigned int cr0_pg:1;
unsigned int cr4_pse:1;
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
+ unsigned int cr4_la57:1;
};
};
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9e9e12d7cb28..ecd5e9fd3f8c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4709,34 +4709,40 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
}
static union kvm_mmu_role
-kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu)
+kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, bool mmu_init)
{
union kvm_mmu_role role = {0};
role.base_role.access = ACC_ALL;
+ role.base_role.nxe = !!is_nx(vcpu);
+ role.base_role.cr4_pae = !!is_pae(vcpu);
role.base_role.cr0_wp = is_write_protection(vcpu);
+ role.base_role.smm = is_smm(vcpu);
+ role.base_role.guest_mode = is_guest_mode(vcpu);
+ if (!mmu_init)
+ return role;
+
+ role.scache.cr0_pg = !!is_paging(vcpu);
role.scache.cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
role.scache.cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
role.scache.cr4_pse = !!is_pse(vcpu);
role.scache.cr4_pke = kvm_read_cr4_bits(vcpu, X86_CR4_PKE) != 0;
+ role.scache.cr4_la57 = kvm_read_cr4_bits(vcpu, X86_CR4_LA57) != 0;
role.scache.valid = 1;
return role;
}
-static union kvm_mmu_page_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
+static union kvm_mmu_role
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool mmu_init)
{
- union kvm_mmu_page_role role = {0};
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, mmu_init);
- role.guest_mode = is_guest_mode(vcpu);
- role.smm = is_smm(vcpu);
- role.ad_disabled = (shadow_accessed_mask == 0);
- role.level = kvm_x86_ops->get_tdp_level(vcpu);
- role.direct = true;
- role.access = ACC_ALL;
+ role.base_role.ad_disabled = (shadow_accessed_mask == 0);
+ role.base_role.level = kvm_x86_ops->get_tdp_level(vcpu);
+ role.base_role.direct = true;
return role;
}
@@ -4744,9 +4750,14 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_tdp_mmu_root_page_role(vcpu, true);
- context->mmu_role.base_role.word = mmu_base_role_mask.word &
- kvm_calc_tdp_mmu_root_page_role(vcpu).word;
+ new_role.base_role.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
+
+ context->mmu_role.as_u64 = new_role.as_u64;
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -4786,29 +4797,23 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
-static union kvm_mmu_page_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
-{
- union kvm_mmu_page_role role = {0};
- bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
-
- role.nxe = is_nx(vcpu);
- role.cr4_pae = !!is_pae(vcpu);
- role.cr0_wp = is_write_protection(vcpu);
- role.smep_andnot_wp = smep && !is_write_protection(vcpu);
- role.smap_andnot_wp = smap && !is_write_protection(vcpu);
- role.guest_mode = is_guest_mode(vcpu);
- role.smm = is_smm(vcpu);
- role.direct = !is_paging(vcpu);
- role.access = ACC_ALL;
+static union kvm_mmu_role
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool mmu_init)
+{
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, mmu_init);
+
+ role.base_role.smep_andnot_wp = role.scache.cr4_smep &&
+ !is_write_protection(vcpu);
+ role.base_role.smap_andnot_wp = role.scache.cr4_smap &&
+ !is_write_protection(vcpu);
+ role.base_role.direct = !is_paging(vcpu);
if (!is_long_mode(vcpu))
- role.level = PT32E_ROOT_LEVEL;
+ role.base_role.level = PT32E_ROOT_LEVEL;
else if (is_la57_mode(vcpu))
- role.level = PT64_ROOT_5LEVEL;
+ role.base_role.level = PT64_ROOT_5LEVEL;
else
- role.level = PT64_ROOT_4LEVEL;
+ role.base_role.level = PT64_ROOT_4LEVEL;
return role;
}
@@ -4816,6 +4821,12 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+
+ new_role.base_role.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -4826,8 +4837,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
else
paging32_init_context(vcpu, context);
- context->mmu_role.base_role.word = mmu_base_role_mask.word &
- kvm_calc_shadow_mmu_root_page_role(vcpu).word;
+ context->mmu_role.as_u64 = new_role.as_u64;
reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@@ -4836,7 +4846,7 @@ static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
bool execonly)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, true);
role.base_role.level = PT64_ROOT_4LEVEL;
role.base_role.direct = false;
@@ -4961,10 +4971,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ union kvm_mmu_role role;
+
if (tdp_enabled)
- return kvm_calc_tdp_mmu_root_page_role(vcpu);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, false);
else
- return kvm_calc_shadow_mmu_root_page_role(vcpu);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+
+ return role.base_role;
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
--
2.14.4
Powered by blists - more mailing lists