[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTzU=LG7bV9520yJNH0jRgcZa=C3JZNr6LHSmHq28Kjeqw@mail.gmail.com>
Date: Tue, 29 Mar 2022 09:50:55 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Kalesh Singh <kaleshsingh@...gle.com>
Cc: will@...nel.org, maz@...nel.org, qperret@...gle.com,
surenb@...gle.com, kernel-team@...roid.com,
James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Catalin Marinas <catalin.marinas@....com>,
Mark Rutland <mark.rutland@....com>,
Mark Brown <broonie@...nel.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Collingbourne <pcc@...gle.com>,
"Madhavan T. Venkataraman" <madvenka@...ux.microsoft.com>,
Stephen Boyd <swboyd@...omium.org>,
Andrew Walbran <qwandor@...gle.com>,
Andrew Scull <ascull@...gle.com>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v6 3/8] KVM: arm64: Add guard pages for KVM nVHE
hypervisor stack
Hi Kalesh,
On Mon, Mar 14, 2022 at 8:03 PM Kalesh Singh <kaleshsingh@...gle.com> wrote:
>
> Map the stack pages in the flexible private VA range and allocate
> guard pages below the stack as unbacked VA space. The stack is aligned
> so that any valid stack address has PAGE_SHIFT bit as 1 - this is used
> for overflow detection (implemented in a subsequent patch in the series).
>
> Signed-off-by: Kalesh Singh <kaleshsingh@...gle.com>
Tested-by: Fuad Tabba <tabba@...gle.com>
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Thanks,
/fuad
> ---
>
> Changes in v6:
> - Update call to hyp_alloc_private_va_range() (return val and params)
>
> Changes in v5:
> - Use a single allocation for stack and guard pages to ensure they
> are contiguous, per Marc
>
> Changes in v4:
> - Replace IS_ERR_OR_NULL check with IS_ERR check now that
> hyp_alloc_private_va_range() returns an error for null
> pointer, per Fuad
> - Format comments to < 80 cols, per Fuad
>
> Changes in v3:
> - Handle null ptr in IS_ERR_OR_NULL checks, per Mark
>
>
> arch/arm64/include/asm/kvm_asm.h | 1 +
> arch/arm64/include/asm/kvm_mmu.h | 3 +++
> arch/arm64/kvm/arm.c | 39 +++++++++++++++++++++++++++++---
> arch/arm64/kvm/mmu.c | 4 ++--
> 4 files changed, 42 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index d5b0386ef765..2e277f2ed671 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -169,6 +169,7 @@ struct kvm_nvhe_init_params {
> unsigned long tcr_el2;
> unsigned long tpidr_el2;
> unsigned long stack_hyp_va;
> + unsigned long stack_pa;
> phys_addr_t pgd_pa;
> unsigned long hcr_el2;
> unsigned long vttbr;
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 3cc9aa25f510..967a9ea9a47b 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -116,6 +116,9 @@ alternative_cb_end
> #include <asm/cacheflush.h>
> #include <asm/mmu_context.h>
>
> +extern struct kvm_pgtable *hyp_pgtable;
> +extern struct mutex kvm_hyp_pgd_mutex;
> +
> void kvm_update_va_mask(struct alt_instr *alt,
> __le32 *origptr, __le32 *updptr, int nr_inst);
> void kvm_compute_layout(void);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 4dca6ffd03d4..72be7e695d8d 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1538,7 +1538,6 @@ static void cpu_prepare_hyp_mode(int cpu)
> tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
> params->tcr_el2 = tcr;
>
> - params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE);
> params->pgd_pa = kvm_mmu_get_httbr();
> if (is_protected_kvm_enabled())
> params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
> @@ -1987,14 +1986,48 @@ static int init_hyp_mode(void)
> * Map the Hyp stack pages
> */
> for_each_possible_cpu(cpu) {
> + struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
> char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
> - err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
> - PAGE_HYP);
> + unsigned long hyp_addr;
>
> + /*
> + * Allocate a contiguous HYP private VA range for the stack
> + * and guard page. The allocation is also aligned based on
> + * the order of its size.
> + */
> + err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
> + if (err) {
> + kvm_err("Cannot allocate hyp stack guard page\n");
> + goto out_err;
> + }
> +
> + /*
> + * Since the stack grows downwards, map the stack to the page
> + * at the higher address and leave the lower guard page
> + * unbacked.
> + *
> + * Any valid stack address now has the PAGE_SHIFT bit as 1
> + * and addresses corresponding to the guard page have the
> + * PAGE_SHIFT bit as 0 - this is used for overflow detection.
> + */
> + mutex_lock(&kvm_hyp_pgd_mutex);
> + err = kvm_pgtable_hyp_map(hyp_pgtable, hyp_addr + PAGE_SIZE,
> + PAGE_SIZE, __pa(stack_page), PAGE_HYP);
> + mutex_unlock(&kvm_hyp_pgd_mutex);
> if (err) {
> kvm_err("Cannot map hyp stack\n");
> goto out_err;
> }
> +
> + /*
> + * Save the stack PA in nvhe_init_params. This will be needed
> + * to recreate the stack mapping in protected nVHE mode.
> + * __hyp_pa() won't do the right thing there, since the stack
> + * has been mapped in the flexible private VA space.
> + */
> + params->stack_pa = __pa(stack_page);
> +
> + params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
> }
>
> for_each_possible_cpu(cpu) {
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 7326d683c500..9bfc6d8f3c49 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -22,8 +22,8 @@
>
> #include "trace.h"
>
> -static struct kvm_pgtable *hyp_pgtable;
> -static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
> +struct kvm_pgtable *hyp_pgtable;
> +DEFINE_MUTEX(kvm_hyp_pgd_mutex);
>
> static unsigned long hyp_idmap_start;
> static unsigned long hyp_idmap_end;
> --
> 2.35.1.723.g4982287a31-goog
>
Powered by blists - more mailing lists