[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2555fdfe-8979-3aac-6bf1-a9bab11a79e2@arm.com>
Date: Tue, 18 Apr 2023 15:33:42 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Ard Biesheuvel <ardb@...nel.org>, linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Anshuman Khandual <anshuman.khandual@....com>,
Kees Cook <keescook@...omium.org>
Subject: Re: [PATCH v3 56/60] arm64: kvm: Limit HYP VA and host S2 range to 48
bits when LPA2 is in effect
On 07/03/2023 14:05, Ard Biesheuvel wrote:
> The KVM code needs more work to support 5 level paging with LPA2, so for
> the time being, limit KVM to 48 bit addressing on 4k and 16k pagesize
> configurations. This can be reverted once the LPA2 support for KVM is
> merged.
Don't you still have a problem that a user's memory could map to physical memory
above 48 bits that it tries to map into a KVM VM? How do you protect against
that? I think KVM needs to be disabled entirely when the kernel is using LPA2,
until KVM explicitly supports LPA2 too?
>
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> ---
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 2 ++
> arch/arm64/kvm/mmu.c | 5 ++++-
> arch/arm64/kvm/va_layout.c | 9 +++++----
> 3 files changed, 11 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 552653fa18be34b2..e00b87ed4a8400f6 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -128,6 +128,8 @@ static void prepare_host_vtcr(void)
> /* The host stage 2 is id-mapped, so use parange for T0SZ */
> parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
> phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
> + if (IS_ENABLED(CONFIG_ARM64_LPA2) && phys_shift > 48)
> + phys_shift = 48; // not implemented yet
>
> host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
> id_aa64mmfr1_el1_sys_val, phys_shift);
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 4e7c0f9a9c286c09..2ad9e6f1e101e52d 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -661,7 +661,8 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
> {
> struct kvm_pgtable pgt = {
> .pgd = (kvm_pteref_t)kvm->mm->pgd,
> - .ia_bits = vabits_actual,
> + .ia_bits = IS_ENABLED(CONFIG_ARM64_LPA2) ? 48
> + : vabits_actual,
> .start_level = (KVM_PGTABLE_MAX_LEVELS -
> ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits)),
> .mm_ops = &kvm_user_mm_ops,
> @@ -1703,6 +1704,8 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
> idmap_bits = 48;
> kernel_bits = vabits_actual;
> *hyp_va_bits = max(idmap_bits, kernel_bits);
> + if (IS_ENABLED(CONFIG_ARM64_LPA2))
> + *hyp_va_bits = 48; // LPA2 is not yet supported in KVM
>
> kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
> kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
> diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
> index 341b67e2f2514e55..ac87d0c39c38f7d9 100644
> --- a/arch/arm64/kvm/va_layout.c
> +++ b/arch/arm64/kvm/va_layout.c
> @@ -59,12 +59,13 @@ static void init_hyp_physvirt_offset(void)
> */
> __init void kvm_compute_layout(void)
> {
> + u64 vabits = IS_ENABLED(CONFIG_ARM64_LPA2) ? 48 : vabits_actual; // not yet
> phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
> u64 hyp_va_msb;
>
> /* Where is my RAM region? */
> - hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
> - hyp_va_msb ^= BIT(vabits_actual - 1);
> + hyp_va_msb = idmap_addr & BIT(vabits - 1);
> + hyp_va_msb ^= BIT(vabits - 1);
>
> tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
> (u64)(high_memory - 1));
> @@ -72,10 +73,10 @@ __init void kvm_compute_layout(void)
> va_mask = GENMASK_ULL(tag_lsb - 1, 0);
> tag_val = hyp_va_msb;
>
> - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1) &&
> + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits - 1) &&
> !kaslr_disabled_cmdline()) {
> /* We have some free bits to insert a random tag. */
> - tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
> + tag_val |= get_random_long() & GENMASK_ULL(vabits - 2, tag_lsb);
> }
> tag_val >>= tag_lsb;
>
Powered by blists - more mailing lists