[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <48cd9218-cc30-65b6-343c-804dea427e30@amd.com>
Date: Thu, 12 Nov 2020 10:19:40 -0600
From: Babu Moger <babu.moger@....com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: junaids@...gle.com, wanpengli@...cent.com, kvm@...r.kernel.org,
joro@...tes.org, x86@...nel.org, linux-kernel@...r.kernel.org,
sean.j.christopherson@...el.com, mingo@...hat.com, bp@...en8.de,
hpa@...or.com, tglx@...utronix.de, vkuznets@...hat.com,
jmattson@...gle.com
Subject: Re: [PATCH 2/2] KVM:SVM: Mask SEV encryption bit from CR3 reserved
bits
On 11/12/20 2:32 AM, Paolo Bonzini wrote:
> On 12/11/20 01:28, Babu Moger wrote:
>> Add support to the mask_cr3_rsvd_bits() callback to mask the
>> encryption bit from the CR3 value when SEV is enabled.
>>
>> Additionally, cache the encryption mask for quick access during
>> the check.
>>
>> Fixes: a780a3ea628268b2 ("KVM: X86: Fix reserved bits check for MOV to
>> CR3")
>> Signed-off-by: Babu Moger <babu.moger@....com>
>> ---
>> arch/x86/kvm/svm/svm.c | 11 ++++++++++-
>> arch/x86/kvm/svm/svm.h | 3 +++
>> 2 files changed, 13 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index a491a47d7f5c..c2b1e52810c6 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -3741,6 +3741,7 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu,
>> gfn_t gfn, bool is_mmio)
>> static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>> {
>> struct vcpu_svm *svm = to_svm(vcpu);
>> + struct kvm_cpuid_entry2 *best;
>> vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu,
>> X86_FEATURE_XSAVE) &&
>> boot_cpu_has(X86_FEATURE_XSAVE) &&
>> @@ -3771,6 +3772,12 @@ static void svm_vcpu_after_set_cpuid(struct
>> kvm_vcpu *vcpu)
>> if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
>> kvm_request_apicv_update(vcpu->kvm, false,
>> APICV_INHIBIT_REASON_NESTED);
>> +
>> + best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
>> + if (best)
>> + svm->sev_enc_mask = ~(1UL << (best->ebx & 0x3f));
>> + else
>> + svm->sev_enc_mask = ~0UL;
>> }
>> static bool svm_has_wbinvd_exit(void)
>> @@ -4072,7 +4079,9 @@ static void enable_smi_window(struct kvm_vcpu *vcpu)
>> static unsigned long svm_mask_cr3_rsvd_bits(struct kvm_vcpu *vcpu,
>> unsigned long cr3)
>> {
>> - return cr3;
>> + struct vcpu_svm *svm = to_svm(vcpu);
>> +
>> + return sev_guest(vcpu->kvm) ? (cr3 & svm->sev_enc_mask) : cr3;
>> }
>> static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void
>> *insn, int insn_len)
>> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
>> index 1d853fe4c778..57a36645a0e4 100644
>> --- a/arch/x86/kvm/svm/svm.h
>> +++ b/arch/x86/kvm/svm/svm.h
>> @@ -152,6 +152,9 @@ struct vcpu_svm {
>> u64 *avic_physical_id_cache;
>> bool avic_is_running;
>> + /* SEV Memory encryption mask */
>> + unsigned long sev_enc_mask;
>> +
>> /*
>> * Per-vcpu list of struct amd_svm_iommu_ir:
>> * This is used mainly to store interrupt remapping information used
>>
>
> Instead of adding a new callback, you can add a field to struct
> kvm_vcpu_arch:
>
> if (is_long_mode(vcpu) &&
> - (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
> + (cr3 & vcpu->arch.cr3_lm_rsvd_bits))
>
> Set it in kvm_vcpu_after_set_cpuid, and clear the memory encryption bit in
> kvm_x86_ops.vcpu_after_set_cpuid.
Yes. That should work. Will resubmit the patches. Thanks
Powered by blists - more mailing lists