[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c57e0bae-c4db-c19b-7727-3695cacd5ba9@redhat.com>
Date: Tue, 2 May 2017 18:24:13 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Brijesh Singh <brijesh.singh@....com>,
Radim Krčmář <rkrcmar@...hat.com>
Cc: joro@...tes.org, kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
x86@...nel.org, Thomas.Lendacky@....com
Subject: Re: [PATCH] x86: kvm: Avoid guest page table walk when gpa_available
is set
On 28/04/2017 21:15, Brijesh Singh wrote:
> Hi Radim,
>
>>
>> This will probably return false negatives when then vcpu->arch.gpa_val
>> couldn't be used anyway (possibly after a VM exits), so it is hard to
>> draw a conclusion.
>>
>> I would really like if we had a solution that passed the gpa inside
>> function parameters.
>>
>> (Btw. cr2 can also be a virtual address, so we can call it gpa directly)
>>
>
> I've tried the below patch and it seems to work fine. This does not
> consider
> PIO case and as you rightly pointed PIO should trigger #NPF relatively
> rarely.
> At least so far in my runs I have not seen PIO causing #NPF. If this sounds
> acceptable approach then I can submit v2 with these changes and remove
> gpa_val
> additional.
PIO can certainly cause #NPF, albeit rarely, so this is not a viable one.
Paolo
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 13c132b..c040e38 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4662,17 +4662,17 @@ static int emulator_read_write_onepage(unsigned
> long addr, void *val,
> */
> if (vcpu->arch.gpa_available &&
> emulator_can_use_gpa(ctxt) &&
> - vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) &&
> (addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) {
> gpa = exception->address;
> - goto mmio;
> + ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
> + } else {
> + dump_stack();
> + ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception,
> write);
> +
> + if (ret < 0)
> + return X86EMUL_PROPAGATE_FAULT;
> }
>
> - ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
> -
> - if (ret < 0)
> - return X86EMUL_PROPAGATE_FAULT;
> -
> /* For APIC access vmexit */
> if (ret)
> goto mmio;
> @@ -7056,11 +7056,11 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
> return r;
> }
>
> -static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
> +static inline int complete_emulated_io(struct kvm_vcpu *vcpu, unsigned
> long cr2)
> {
> int r;
> vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> - r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
> + r = x86_emulate_instruction(vcpu, cr2, EMULTYPE_NO_DECODE, NULL,
> 0);
> srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
> if (r != EMULATE_DONE)
> return 0;
> @@ -7071,7 +7071,7 @@ static int complete_emulated_pio(struct kvm_vcpu
> *vcpu)
> {
> BUG_ON(!vcpu->arch.pio.count);
>
> - return complete_emulated_io(vcpu);
> + return complete_emulated_io(vcpu, 0);
> }
> /*
> @@ -7097,6 +7097,7 @@ static int complete_emulated_mmio(struct kvm_vcpu
> *vcpu)
> struct kvm_run *run = vcpu->run;
> struct kvm_mmio_fragment *frag;
> unsigned len;
> + gpa_t gpa;
>
> BUG_ON(!vcpu->mmio_needed);
>
> @@ -7106,6 +7107,12 @@ static int complete_emulated_mmio(struct kvm_vcpu
> *vcpu)
> if (!vcpu->mmio_is_write)
> memcpy(frag->data, run->mmio.data, len);
>
> + /*
> + * lets use the GPA from previous guest page table walk to avoid yet
> + * another guest page table walk when completing the MMIO
> page-fault.
> + */
> + gpa = frag->gpa;
> +
> if (frag->len <= 8) {
> /* Switch to the next fragment. */
> frag++;
> @@ -7124,7 +7131,7 @@ static int complete_emulated_mmio(struct kvm_vcpu
> *vcpu)
> if (vcpu->mmio_is_write)
> return 1;
> vcpu->mmio_read_completed = 1;
> - return complete_emulated_io(vcpu);
> + return complete_emulated_io(vcpu, gpa);
> }
>
> run->exit_reason = KVM_EXIT_MMIO;
>
Powered by blists - more mailing lists