[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAC_TJvcCcv0ENHfpMHBzNMJVtBpjAKqJ_aO_HtKk2uBw+O9Y9A@mail.gmail.com>
Date: Thu, 24 Feb 2022 09:28:24 -0800
From: Kalesh Singh <kaleshsingh@...gle.com>
To: Fuad Tabba <tabba@...gle.com>
Cc: Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Quentin Perret <qperret@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
"Cc: Android Kernel" <kernel-team@...roid.com>,
James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Catalin Marinas <catalin.marinas@....com>,
Mark Rutland <mark.rutland@....com>,
Mark Brown <broonie@...nel.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Collingbourne <pcc@...gle.com>,
"Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>,
Andrew Walbran <qwandor@...gle.com>,
Andrew Scull <ascull@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Ard Biesheuvel <ardb@...nel.org>,
"moderated list:ARM64 PORT (AARCH64 ARCHITECTURE)"
<linux-arm-kernel@...ts.infradead.org>,
kvmarm@...ts.cs.columbia.edu, LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v3 2/8] KVM: arm64: Introduce pkvm_alloc_private_va_range()
On Thu, Feb 24, 2022 at 4:26 AM Fuad Tabba <tabba@...gle.com> wrote:
>
> Hi Kalesh,
>
> I really like how this makes the code cleaner in general. A couple of
> small nits below.
>
> On Thu, Feb 24, 2022 at 5:17 AM 'Kalesh Singh' via kernel-team
> <kernel-team@...roid.com> wrote:
> >
> > pkvm_hyp_alloc_private_va_range() can be used to reserve private VA ranges
> > in the pKVM nVHE hypervisor (). Also update __pkvm_create_private_mapping()
> > to allow specifying an alignment for the private VA mapping.
> >
> > These will be used to implement stack guard pages for pKVM nVHE hypervisor
> > (in a subsequent patch in the series).
> >
> > Credits to Quentin Perret <qperret@...gle.com> for the idea of moving
> > private VA allocation out of __pkvm_create_private_mapping()
> >
> > Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
> > ---
> >
> > Changes in v3:
> > - Handle null ptr in IS_ERR_OR_NULL checks, per Mark
> >
> > Changes in v2:
> > - Allow specifying an alignment for the private VA allocations, per Marc
> >
> > arch/arm64/kvm/hyp/include/nvhe/mm.h | 3 +-
> > arch/arm64/kvm/hyp/nvhe/hyp-main.c | 5 +--
> > arch/arm64/kvm/hyp/nvhe/mm.c | 51 ++++++++++++++++++----------
> > arch/arm64/kvm/mmu.c | 2 +-
> > 4 files changed, 40 insertions(+), 21 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
> > index 2d08510c6cc1..05d06ad00347 100644
> > --- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
> > +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
> > @@ -20,7 +20,8 @@ int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
> > int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
> > int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
> > unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
> > - enum kvm_pgtable_prot prot);
> > + size_t align, enum kvm_pgtable_prot prot);
>
> Minor nit: the alignment of this does not match how it was before,
> i.e., it's not in line with the other function parameters. Yet it
> still goes over 80 characters.
Ack
>
> > +unsigned long pkvm_alloc_private_va_range(size_t size, size_t align);
> >
> > static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
> > unsigned long *start, unsigned long *end)
> > diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> > index 5e2197db0d32..96b2312a0f1d 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> > @@ -158,9 +158,10 @@ static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ct
> > {
> > DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
> > DECLARE_REG(size_t, size, host_ctxt, 2);
> > - DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
> > + DECLARE_REG(size_t, align, host_ctxt, 3);
> > + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
> >
> > - cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
> > + cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, align, prot);
> > }
> >
> > static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
> > diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
> > index 526a7d6fa86f..f35468ec639d 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/mm.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/mm.c
> > @@ -37,26 +37,46 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
> > return err;
> > }
> >
> > -unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
> > - enum kvm_pgtable_prot prot)
> > +/*
> > + * Allocates a private VA range above __io_map_base.
> > + *
> > + * @size: The size of the VA range to reserve.
> > + * @align: The required alignment for the allocation.
> > + */
> > +unsigned long pkvm_alloc_private_va_range(size_t size, size_t align)
> > {
> > - unsigned long addr;
> > - int err;
> > + unsigned long base, addr;
> >
> > hyp_spin_lock(&pkvm_pgd_lock);
> >
> > - size = PAGE_ALIGN(size + offset_in_page(phys));
> > - addr = __io_map_base;
> > - __io_map_base += size;
> > + addr = ALIGN(__io_map_base, align);
> > +
> > + /* The allocated size is always a multiple of PAGE_SIZE */
> > + base = addr + PAGE_ALIGN(size);
> >
> > /* Are we overflowing on the vmemmap ? */
> > - if (__io_map_base > __hyp_vmemmap) {
> > - __io_map_base -= size;
> > + if (base > __hyp_vmemmap)
> > addr = (unsigned long)ERR_PTR(-ENOMEM);
> > + else
> > + __io_map_base = base;
> > +
> > + hyp_spin_unlock(&pkvm_pgd_lock);
> > +
> > + return addr;
> > +}
> > +
> > +unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
> > + size_t align, enum kvm_pgtable_prot prot)
> > +{
> > + unsigned long addr;
> > + int err;
> > +
> > + size += offset_in_page(phys);
>
> Same as in the patch before, the previous code would align the size
> but not this change. However, looking at the callers and callees this
> seems to be fine, since it's aligned when needed.
This is now handled by pkvm_alloc_private_va_range(), so caller doesn't need to:
...
/* The allocated size is always a multiple of PAGE_SIZE */
base = addr + PAGE_ALIGN(size);
...
Thanks,
Kalesh
>
> Thanks,
> /fuad
>
> > + addr = pkvm_alloc_private_va_range(size, align);
> > + if (IS_ERR((void *)addr))
> > goto out;
> > - }
> >
> > - err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
> > + err = __pkvm_create_mappings(addr, size, phys, prot);
> > if (err) {
> > addr = (unsigned long)ERR_PTR(err);
> > goto out;
> > @@ -64,8 +84,6 @@ unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
> >
> > addr = addr + offset_in_page(phys);
> > out:
> > - hyp_spin_unlock(&pkvm_pgd_lock);
> > -
> > return addr;
> > }
> >
> > @@ -152,11 +170,10 @@ int hyp_map_vectors(void)
> > return 0;
> >
> > phys = __hyp_pa(__bp_harden_hyp_vecs);
> > - bp_base = (void *)__pkvm_create_private_mapping(phys,
> > - __BP_HARDEN_HYP_VECS_SZ,
> > - PAGE_HYP_EXEC);
> > + bp_base = (void *)__pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
> > + PAGE_SIZE, PAGE_HYP_EXEC);
> > if (IS_ERR_OR_NULL(bp_base))
> > - return PTR_ERR(bp_base);
> > + return bp_base ? PTR_ERR(bp_base) : -ENOMEM;
> >
> > __hyp_bp_vect_base = bp_base;
> >
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index fc09536c8197..298e6d8439ef 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -505,7 +505,7 @@ int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
> >
> > if (!kvm_host_owns_hyp_mappings()) {
> > addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
> > - phys_addr, size, prot);
> > + phys_addr, size, align, prot);
> > if (IS_ERR_OR_NULL((void *)addr))
> > return addr ? PTR_ERR((void *)addr) : -ENOMEM;
> > *haddr = addr;
> > --
> > 2.35.1.473.g83b2b277ed-goog
> >
> > --
> > To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@...roid.com.
> >
Powered by blists - more mailing lists