[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <05d41219-6c0c-8851-dab6-24f9c76aed57@redhat.com>
Date: Tue, 30 Jul 2019 13:20:13 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Anup Patel <Anup.Patel@....com>,
Palmer Dabbelt <palmer@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Radim K <rkrcmar@...hat.com>
Cc: Daniel Lezcano <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Atish Patra <Atish.Patra@....com>,
Alistair Francis <Alistair.Francis@....com>,
Damien Le Moal <Damien.LeMoal@....com>,
Christoph Hellwig <hch@...radead.org>,
Anup Patel <anup@...infault.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-riscv@...ts.infradead.org" <linux-riscv@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [RFC PATCH 08/16] RISC-V: KVM: Handle MMIO exits for VCPU
On 29/07/19 13:57, Anup Patel wrote:
> +static ulong get_insn(struct kvm_vcpu *vcpu)
> +{
> + ulong __sepc = vcpu->arch.guest_context.sepc;
> + ulong __hstatus, __sstatus, __vsstatus;
> +#ifdef CONFIG_RISCV_ISA_C
> + ulong rvc_mask = 3, tmp;
> +#endif
> + ulong flags, val;
> +
> + local_irq_save(flags);
> +
> + __vsstatus = csr_read(CSR_VSSTATUS);
> + __sstatus = csr_read(CSR_SSTATUS);
> + __hstatus = csr_read(CSR_HSTATUS);
> +
> + csr_write(CSR_VSSTATUS, __vsstatus | SR_MXR);
> + csr_write(CSR_SSTATUS, vcpu->arch.guest_context.sstatus | SR_MXR);
> + csr_write(CSR_HSTATUS, vcpu->arch.guest_context.hstatus | HSTATUS_SPRV);
> +
> +#ifndef CONFIG_RISCV_ISA_C
> + asm ("\n"
> +#ifdef CONFIG_64BIT
> + STR(LWU) " %[insn], (%[addr])\n"
> +#else
> + STR(LW) " %[insn], (%[addr])\n"
> +#endif
> + : [insn] "=&r" (val) : [addr] "r" (__sepc));
> +#else
> + asm ("and %[tmp], %[addr], 2\n"
> + "bnez %[tmp], 1f\n"
> +#ifdef CONFIG_64BIT
> + STR(LWU) " %[insn], (%[addr])\n"
> +#else
> + STR(LW) " %[insn], (%[addr])\n"
> +#endif
> + "and %[tmp], %[insn], %[rvc_mask]\n"
> + "beq %[tmp], %[rvc_mask], 2f\n"
> + "sll %[insn], %[insn], %[xlen_minus_16]\n"
> + "srl %[insn], %[insn], %[xlen_minus_16]\n"
> + "j 2f\n"
> + "1:\n"
> + "lhu %[insn], (%[addr])\n"
> + "and %[tmp], %[insn], %[rvc_mask]\n"
> + "bne %[tmp], %[rvc_mask], 2f\n"
> + "lhu %[tmp], 2(%[addr])\n"
> + "sll %[tmp], %[tmp], 16\n"
> + "add %[insn], %[insn], %[tmp]\n"
> + "2:"
> + : [vsstatus] "+&r" (__vsstatus), [insn] "=&r" (val),
> + [tmp] "=&r" (tmp)
> + : [addr] "r" (__sepc), [rvc_mask] "r" (rvc_mask),
> + [xlen_minus_16] "i" (__riscv_xlen - 16));
> +#endif
> +
> + csr_write(CSR_HSTATUS, __hstatus);
> + csr_write(CSR_SSTATUS, __sstatus);
> + csr_write(CSR_VSSTATUS, __vsstatus);
> +
> + local_irq_restore(flags);
> +
> + return val;
> +}
> +
This also needs fixups for exceptions, because the guest can race
against the host and modify its page tables concurrently with the
vmexit. (How effective this is, of course, depends on how the TLB is
implemented in hardware, but you need to do the safe thing anyway).
Paolo
Powered by blists - more mailing lists