[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <05c98b5e6627ddb01bbedb08a2854d8c55bdc2d7.camel@redhat.com>
Date: Mon, 24 May 2021 20:49:28 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Ilias Stamatis <ilstam@...zon.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, pbonzini@...hat.com
Cc: seanjc@...gle.com, vkuznets@...hat.com, wanpengli@...cent.com,
jmattson@...gle.com, joro@...tes.org, zamsden@...il.com,
mtosatti@...hat.com, dwmw@...zon.co.uk
Subject: Re: [PATCH v3 02/12] KVM: X86: Store L1's TSC scaling ratio in
'struct kvm_vcpu_arch'
On Fri, 2021-05-21 at 11:24 +0100, Ilias Stamatis wrote:
> Store L1's scaling ratio in the kvm_vcpu_arch struct like we already do
> for L1's TSC offset. This allows for easy save/restore when we enter and
> then exit the nested guest.
>
> Signed-off-by: Ilias Stamatis <ilstam@...zon.com>
> ---
> arch/x86/include/asm/kvm_host.h | 5 +++--
> arch/x86/kvm/vmx/vmx.c | 4 ++--
> arch/x86/kvm/x86.c | 6 ++++--
> 3 files changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 55efbacfc244..7dfc609eacd6 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -707,7 +707,7 @@ struct kvm_vcpu_arch {
> } st;
>
> u64 l1_tsc_offset;
> - u64 tsc_offset;
> + u64 tsc_offset; /* current tsc offset */
> u64 last_guest_tsc;
> u64 last_host_tsc;
> u64 tsc_offset_adjustment;
> @@ -721,7 +721,8 @@ struct kvm_vcpu_arch {
> u32 virtual_tsc_khz;
> s64 ia32_tsc_adjust_msr;
> u64 msr_ia32_power_ctl;
> - u64 tsc_scaling_ratio;
> + u64 l1_tsc_scaling_ratio;
> + u64 tsc_scaling_ratio; /* current scaling ratio */
>
> atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
> unsigned nmi_pending; /* NMI queued after currently running handler */
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 4bceb5ca3a89..3e4dda8177bb 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7453,10 +7453,10 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
> delta_tsc = 0;
>
> /* Convert to host delta tsc if tsc scaling is enabled */
> - if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
> + if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
> delta_tsc && u64_shl_div_u64(delta_tsc,
> kvm_tsc_scaling_ratio_frac_bits,
> - vcpu->arch.tsc_scaling_ratio, &delta_tsc))
> + vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
> return -ERANGE;
>
> /*
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index bbc4e04e67ad..6ab95ac188a5 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2185,6 +2185,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
>
> /* Guest TSC same frequency as host TSC? */
> if (!scale) {
> + vcpu->arch.l1_tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
> vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
> return 0;
> }
> @@ -2211,7 +2212,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
> return -1;
> }
>
> - vcpu->arch.tsc_scaling_ratio = ratio;
> + vcpu->arch.l1_tsc_scaling_ratio = vcpu->arch.tsc_scaling_ratio = ratio;
> return 0;
> }
>
> @@ -2223,6 +2224,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
> /* tsc_khz can be zero if TSC calibration fails */
> if (user_tsc_khz == 0) {
> /* set tsc_scaling_ratio to a safe value */
> + vcpu->arch.l1_tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
> vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
> return -1;
> }
> @@ -2459,7 +2461,7 @@ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
>
> static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
> {
> - if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
> + if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
> WARN_ON(adjustment < 0);
> adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
> adjust_tsc_offset_guest(vcpu, adjustment);
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists