lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <dac4aa8c-94d1-475e-ae97-20229bd9ade2@linux.intel.com>
Date: Thu, 18 Apr 2024 19:04:11 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com,
 Sean Christopherson <sean.j.christopherson@...el.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
 isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
 erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
 Sagi Shahar <sagis@...gle.com>, Kai Huang <kai.huang@...el.com>,
 chen.bo@...el.com, hang.yuan@...el.com, tina.zhang@...el.com
Subject: Re: [PATCH v19 110/130] KVM: TDX: Handle TDX PV MMIO hypercall



On 4/18/2024 5:29 PM, Binbin Wu wrote:
>
>> +
>> +static int tdx_emulate_mmio(struct kvm_vcpu *vcpu)
>> +{
>> +    struct kvm_memory_slot *slot;
>> +    int size, write, r;
>> +    unsigned long val;
>> +    gpa_t gpa;
>> +
>> +    KVM_BUG_ON(vcpu->mmio_needed, vcpu->kvm);
>> +
>> +    size = tdvmcall_a0_read(vcpu);
>> +    write = tdvmcall_a1_read(vcpu);
>> +    gpa = tdvmcall_a2_read(vcpu);
>> +    val = write ? tdvmcall_a3_read(vcpu) : 0;
>> +
>> +    if (size != 1 && size != 2 && size != 4 && size != 8)
>> +        goto error;
>> +    if (write != 0 && write != 1)
>> +        goto error;
>> +
>> +    /* Strip the shared bit, allow MMIO with and without it set. */
> Based on the discussion 
> https://lore.kernel.org/all/ZcUO5sFEAIH68JIA@google.com/
> Do we still allow the MMIO without shared bit?
>
>> +    gpa = gpa & ~gfn_to_gpa(kvm_gfn_shared_mask(vcpu->kvm));
>> +
>> +    if (size > 8u || ((gpa + size - 1) ^ gpa) & PAGE_MASK)
> "size > 8u" can be removed, since based on the check of size above, it 
> can't be greater than 8.
>
>
>> +        goto error;
>> +
>> +    slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(gpa));
>> +    if (slot && !(slot->flags & KVM_MEMSLOT_INVALID))
>> +        goto error;
>> +
>> +    if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
> Should this be checked for write first?
>
> I check the handle_ept_misconfig() in VMX, it doesn't check write 
> first neither.
>
> Functionally, it should be OK since guest will not read the address 
> range of fast mmio.
> So the read case will be filtered out by ioeventfd_write().
> But it has take a long way to get to ioeventfd_write().
> Isn't it more efficient to check write first?

I got the reason why in handle_ept_misconfig(), it tries to do fast mmio 
write without checking.
It was intended to make fast mmio faster.
And for ept misconfig case, it's not easy to get the info of read/write.

But in this patch, we have already have read/write info, so maybe we can 
add the check for write before fast mmio?


>
>
>> +        trace_kvm_fast_mmio(gpa);
>> +        return 1;
>> +    }
>> +
>> +    if (write)
>> +        r = tdx_mmio_write(vcpu, gpa, size, val);
>> +    else
>> +        r = tdx_mmio_read(vcpu, gpa, size);
>> +    if (!r) {
>> +        /* Kernel completed device emulation. */
>> +        tdvmcall_set_return_code(vcpu, TDVMCALL_SUCCESS);
>> +        return 1;
>> +    }
>> +
>> +    /* Request the device emulation to userspace device model. */
>> +    vcpu->mmio_needed = 1;
>> +    vcpu->mmio_is_write = write;
>> +    vcpu->arch.complete_userspace_io = tdx_complete_mmio;
>> +
>> +    vcpu->run->mmio.phys_addr = gpa;
>> +    vcpu->run->mmio.len = size;
>> +    vcpu->run->mmio.is_write = write;
>> +    vcpu->run->exit_reason = KVM_EXIT_MMIO;
>> +
>> +    if (write) {
>> +        memcpy(vcpu->run->mmio.data, &val, size);
>> +    } else {
>> +        vcpu->mmio_fragments[0].gpa = gpa;
>> +        vcpu->mmio_fragments[0].len = size;
>> +        trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, size, gpa, 
>> NULL);
>> +    }
>> +    return 0;
>> +
>> +error:
>> +    tdvmcall_set_return_code(vcpu, TDVMCALL_INVALID_OPERAND);
>> +    return 1;
>> +}
>> +
>>   static int handle_tdvmcall(struct kvm_vcpu *vcpu)
>>   {
>>       if (tdvmcall_exit_type(vcpu))
>> @@ -1229,6 +1341,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
>>           return tdx_emulate_hlt(vcpu);
>>       case EXIT_REASON_IO_INSTRUCTION:
>>           return tdx_emulate_io(vcpu);
>> +    case EXIT_REASON_EPT_VIOLATION:
>> +        return tdx_emulate_mmio(vcpu);
>>       default:
>>           break;
>>       }
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 03950368d8db..d5b18cad9dcd 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -13975,6 +13975,7 @@ EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
>>     EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
>>   EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
>> +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_mmio);
>>   EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
>>   EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
>>   EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index e27c22449d85..bc14e1f2610c 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -2689,6 +2689,7 @@ struct kvm_memory_slot 
>> *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
>>         return NULL;
>>   }
>> +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
>>     bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
>>   {
>> @@ -5992,6 +5993,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum 
>> kvm_bus bus_idx, gpa_t addr,
>>       r = __kvm_io_bus_read(vcpu, bus, &range, val);
>>       return r < 0 ? r : 0;
>>   }
>> +EXPORT_SYMBOL_GPL(kvm_io_bus_read);
>>     int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus 
>> bus_idx, gpa_t addr,
>>                   int len, struct kvm_io_device *dev)
>
>


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ