[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f4126aaae2cc90a2f5874702bff326c4a790ef43.camel@redhat.com>
Date: Fri, 22 Oct 2021 02:14:29 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: fwilhelm@...gle.com, seanjc@...gle.com, oupton@...gle.com,
stable@...r.kernel.org
Subject: Re: [PATCH 5/8] KVM: x86: split the two parts of emulator_pio_in
On Wed, 2021-10-13 at 12:56 -0400, Paolo Bonzini wrote:
> emulator_pio_in handles both the case where the data is pending in
> vcpu->arch.pio.count, and the case where I/O has to be done via either
> an in-kernel device or a userspace exit. For SEV-ES we would like
> to split these, to identify clearly the moment at which the
> sev_pio_data is consumed. To this end, create two different
> functions: __emulator_pio_in fills in vcpu->arch.pio.count, while
> complete_emulator_pio_in clears it and releases vcpu->arch.pio.data.
>
> While at it, remove the void* argument also from emulator_pio_in_out.
s/remove the void* argument/remove the unused 'void* val' argument/ maybe?
>
> No functional change intended.
>
> Cc: stable@...r.kernel.org
> Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/x86.c | 42 +++++++++++++++++++++++-------------------
> 1 file changed, 23 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 8880dc36a2b4..07d9533b471d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
> }
>
> static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
> - unsigned short port, void *val,
> + unsigned short port,
> unsigned int count, bool in)
> {
> vcpu->arch.pio.port = port;
> @@ -6927,26 +6927,31 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
> return 0;
> }
>
> -static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
> - unsigned short port, void *val, unsigned int count)
> +static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
> + unsigned short port, unsigned int count)
> {
> - int ret;
> -
> - if (vcpu->arch.pio.count)
> - goto data_avail;
> -
> + WARN_ON(vcpu->arch.pio.count);
> memset(vcpu->arch.pio_data, 0, size * count);
> + return emulator_pio_in_out(vcpu, size, port, count, true);
> +}
>
> - ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
> - if (ret) {
> -data_avail:
> - memcpy(val, vcpu->arch.pio_data, size * count);
> - trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
> - vcpu->arch.pio.count = 0;
> - return 1;
> - }
> +static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, int size,
> + unsigned short port, void *val)
> +{
> + memcpy(val, vcpu->arch.pio_data, size * vcpu->arch.pio.count);
> + trace_kvm_pio(KVM_PIO_IN, port, size, vcpu->arch.pio.count, vcpu->arch.pio_data);
> + vcpu->arch.pio.count = 0;
> +}
>
> - return 0;
> +static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
> + unsigned short port, void *val, unsigned int count)
> +{
> + if (!vcpu->arch.pio.count && !__emulator_pio_in(vcpu, size, port, count))
> + return 0;
^^ maybe I would add a comment here about the fact that kernel completed the
emulation when returing here.
> +
> + WARN_ON(count != vcpu->arch.pio.count);
> + complete_emulator_pio_in(vcpu, size, port, val);
> + return 1;
> }
>
> static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
> @@ -6965,12 +6970,11 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
>
> memcpy(vcpu->arch.pio_data, val, size * count);
> trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
> - ret = emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
> + ret = emulator_pio_in_out(vcpu, size, port, count, false);
> if (ret)
> vcpu->arch.pio.count = 0;
>
> return ret;
> -
> }
>
> static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists