lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri,  5 Feb 2021 18:03:13 +0800
From:   Zhimin Feng <fengzhimin@...edance.com>
To:     x86@...nel.org, kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     pbonzini@...hat.com, seanjc@...gle.com, vkuznets@...hat.com,
        wanpengli@...cent.com, jmattson@...gle.com, joro@...tes.org,
        tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, hpa@...or.com,
        fweisbec@...il.com, zhouyibo@...edance.com,
        zhanghaozhong@...edance.com, Zhimin Feng <fengzhimin@...edance.com>
Subject: [RFC: timer passthrough 5/9] KVM: vmx: use tsc_adjust to enable tsc_offset timer passthrough

when in vm:
rdtsc = host_tsc * (TSC multiplier) + tsc_offset(<0)
so when vm write tsc_deadline_msr the value always less than
tsc stampcounter msr value, the irq never be triggered.

the tsc_adjust msr use as below, host execute
rdtsc = host_tsc + tsc_adjust

when vmentry, we set the tsc_adjust equal tsc_offset and vmcs
tsc offset filed equal 0, so the vm execute rdtsc the result like this:
rdtsc = host_tsc + tsc_adjust + 0
the tsc_deadline_msr value will equal tsc stampcounter msr and
the irq will trigger success.

Signed-off-by: Zhimin Feng <fengzhimin@...edance.com>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/vmx/vmx.c          | 23 +++++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index be8fc230f7c4..7971c9e755a4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -534,6 +534,7 @@ struct tick_device {
 struct timer_passth_info {
 	u64 host_tscd;
 	bool host_in_tscdeadline;
+	u64 host_tsc_adjust;
 	struct clock_event_device *curr_dev;
 
 	void (*orig_event_handler)(struct clock_event_device *dev);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f824ee46e2d3..44b2fd59587e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6659,6 +6659,27 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 
 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
 
+static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, bool to_host)
+{
+	u64 tsc_adjust;
+	struct timer_passth_info *local_timer_info;
+
+	local_timer_info = &per_cpu(passth_info, smp_processor_id());
+
+	if (to_host) {
+		tsc_adjust = local_timer_info->host_tsc_adjust;
+		wrmsrl(MSR_IA32_TSC_ADJUST, tsc_adjust);
+		vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+	} else {
+		rdmsrl(MSR_IA32_TSC_ADJUST, tsc_adjust);
+		local_timer_info->host_tsc_adjust = tsc_adjust;
+
+		wrmsrl(MSR_IA32_TSC_ADJUST, tsc_adjust + vcpu->arch.tsc_offset);
+		vmcs_write64(TSC_OFFSET, 0);
+
+	}
+}
+
 static void vmx_host_lapic_timer_offload(struct kvm_vcpu *vcpu)
 {
 	struct timer_passth_info *local_timer_info;
@@ -6690,6 +6711,7 @@ static void vmx_host_lapic_timer_offload(struct kvm_vcpu *vcpu)
 				PIN_BASED_VMX_PREEMPTION_TIMER);
 	}
 
+	vmx_adjust_tsc_offset(vcpu, false);
 	wrmsrl(MSR_IA32_TSCDEADLINE, 0);
 	if (vcpu->arch.tscd > guest_tscl) {
 		wrmsrl(MSR_IA32_TSCDEADLINE, vcpu->arch.tscd);
@@ -6711,6 +6733,7 @@ static void vmx_restore_passth_timer(struct kvm_vcpu *vcpu)
 	u64 guest_tscd;
 
 	if (vcpu->arch.timer_passth_enable) {
+		vmx_adjust_tsc_offset(vcpu, true);
 		local_timer_info = &per_cpu(passth_info, smp_processor_id());
 		host_tscd = local_timer_info->host_tscd;
 		rdmsrl(MSR_IA32_TSC_DEADLINE, guest_tscd);
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ