[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240226143630.33643-49-jiangshanlai@gmail.com>
Date: Mon, 26 Feb 2024 22:36:05 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@...group.com>,
Hou Wenlong <houwenlong.hwl@...group.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Sean Christopherson <seanjc@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...hat.com>,
kvm@...r.kernel.org,
Paolo Bonzini <pbonzini@...hat.com>,
x86@...nel.org,
Kees Cook <keescook@...omium.org>,
Juergen Gross <jgross@...e.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [RFC PATCH 48/73] KVM: x86/PVM: Implement system registers setting callbacks
From: Lai Jiangshan <jiangshan.ljs@...group.com>
In PVM, the hardware CR0, CR3, and EFER are fixed, and the value of the
guest must match the fixed value; otherwise, the guest is not allowed to
run on the CPU.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
---
arch/x86/kvm/pvm/pvm.c | 51 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/arch/x86/kvm/pvm/pvm.c b/arch/x86/kvm/pvm/pvm.c
index a32d2728eb02..b261309fc946 100644
--- a/arch/x86/kvm/pvm/pvm.c
+++ b/arch/x86/kvm/pvm/pvm.c
@@ -1088,6 +1088,51 @@ static int pvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return ret;
}
+static void pvm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+ /* Nothing to do */
+}
+
+static int pvm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+ vcpu->arch.efer = efer;
+
+ return 0;
+}
+
+static bool pvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ return true;
+}
+
+static void pvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+ if (vcpu->arch.efer & EFER_LME) {
+ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
+ vcpu->arch.efer |= EFER_LMA;
+
+ if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
+ vcpu->arch.efer &= ~EFER_LMA;
+ }
+
+ vcpu->arch.cr0 = cr0;
+}
+
+static bool pvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ return true;
+}
+
+static void pvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ unsigned long old_cr4 = vcpu->arch.cr4;
+
+ vcpu->arch.cr4 = cr4;
+
+ if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
+ kvm_update_cpuid_runtime(vcpu);
+}
+
static void pvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
@@ -2912,13 +2957,19 @@ static struct kvm_x86_ops pvm_x86_ops __initdata = {
.set_segment = pvm_set_segment,
.get_cpl = pvm_get_cpl,
.get_cs_db_l_bits = pvm_get_cs_db_l_bits,
+ .is_valid_cr0 = pvm_is_valid_cr0,
+ .set_cr0 = pvm_set_cr0,
.load_mmu_pgd = pvm_load_mmu_pgd,
+ .is_valid_cr4 = pvm_is_valid_cr4,
+ .set_cr4 = pvm_set_cr4,
+ .set_efer = pvm_set_efer,
.get_gdt = pvm_get_gdt,
.set_gdt = pvm_set_gdt,
.get_idt = pvm_get_idt,
.set_idt = pvm_set_idt,
.set_dr7 = pvm_set_dr7,
.sync_dirty_debug_regs = pvm_sync_dirty_debug_regs,
+ .cache_reg = pvm_cache_reg,
.get_rflags = pvm_get_rflags,
.set_rflags = pvm_set_rflags,
.get_if_flag = pvm_get_if_flag,
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists