lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210512150945.4591-7-ilstam@amazon.com>
Date:   Wed, 12 May 2021 16:09:41 +0100
From:   Ilias Stamatis <ilstam@...zon.com>
To:     <kvm@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
        <pbonzini@...hat.com>
CC:     <mlevitsk@...hat.com>, <seanjc@...gle.com>, <vkuznets@...hat.com>,
        <wanpengli@...cent.com>, <jmattson@...gle.com>, <joro@...tes.org>,
        <zamsden@...il.com>, <mtosatti@...hat.com>, <dwmw@...zon.co.uk>,
        <ilstam@...zon.com>
Subject: [PATCH v2 06/10] KVM: X86: Add functions that calculate the 02 TSC offset and multiplier

When L2 is entered we need to "merge" the TSC multiplier and TSC offset
values of 01 and 12 together.

The merging is done using the following equations:
  offset_02 = ((offset_01 * mult_12) >> shift_bits) + offset_12
  mult_02 = (mult_01 * mult_12) >> shift_bits

Where shift_bits is kvm_tsc_scaling_ratio_frac_bits.

Signed-off-by: Ilias Stamatis <ilstam@...zon.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/x86.c              | 29 +++++++++++++++++++++++++++++
 2 files changed, 31 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4c4a3fefff57..57a25d8e8b0f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1793,6 +1793,8 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_scale_tsc_l1(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
+void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu);
+void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu);
 
 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 84af1af7a2cc..1db6cfc2079f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2346,6 +2346,35 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
+void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu)
+{
+	u64 l2_offset = static_call(kvm_x86_get_l2_tsc_offset)(vcpu);
+	u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
+
+	if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
+		vcpu->arch.tsc_offset = mul_s64_u64_shr(
+				(s64) vcpu->arch.l1_tsc_offset,
+				l2_multiplier,
+				kvm_tsc_scaling_ratio_frac_bits);
+	}
+
+	vcpu->arch.tsc_offset += l2_offset;
+}
+EXPORT_SYMBOL_GPL(kvm_set_02_tsc_offset);
+
+void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+	u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
+
+	if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
+		vcpu->arch.tsc_scaling_ratio = mul_u64_u64_shr(
+				vcpu->arch.l1_tsc_scaling_ratio,
+				l2_multiplier,
+				kvm_tsc_scaling_ratio_frac_bits);
+	}
+}
+EXPORT_SYMBOL_GPL(kvm_set_02_tsc_multiplier);
+
 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
 	vcpu->arch.l1_tsc_offset = offset;
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ