[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <628463d5-b9df-51bd-8e1f-b0a41a928d85@redhat.com>
Date: Mon, 5 Jun 2023 19:43:27 +0800
From: Shaoqin Huang <shahuang@...hat.com>
To: Kristina Martsenko <kristina.martsenko@....com>,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev
Cc: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>,
James Morse <james.morse@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Mark Rutland <mark.rutland@....com>,
Mark Brown <broonie@...nel.org>,
Luis Machado <luis.machado@....com>,
Vladimir Murzin <vladimir.murzin@....com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 07/11] arm64: mops: handle MOPS exceptions
Hi Kristina,
On 5/9/23 22:22, Kristina Martsenko wrote:
> The memory copy/set instructions added as part of FEAT_MOPS can take an
> exception (e.g. page fault) part-way through their execution and resume
> execution afterwards.
>
> If however the task is re-scheduled and execution resumes on a different
> CPU, then the CPU may take a new type of exception to indicate this.
> This is because the architecture allows two options (Option A and Option
> B) to implement the instructions and a heterogeneous system can have
> different implementations between CPUs.
>
> In this case the OS has to reset the registers and restart execution
> from the prologue instruction. The algorithm for doing this is provided
> as part of the Arm ARM.
What is the Arm ARM? I'm not quite understand it.
>
> Add an exception handler for the new exception and wire it up for
> userspace tasks.
>
> Reviewed-by: Catalin Marinas <catalin.marinas@....com>
> Signed-off-by: Kristina Martsenko <kristina.martsenko@....com>
> ---
> arch/arm64/include/asm/esr.h | 11 ++++++-
> arch/arm64/include/asm/exception.h | 1 +
> arch/arm64/kernel/entry-common.c | 11 +++++++
> arch/arm64/kernel/traps.c | 52 ++++++++++++++++++++++++++++++
> 4 files changed, 74 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 8487aec9b658..ca954f566861 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -47,7 +47,7 @@
> #define ESR_ELx_EC_DABT_LOW (0x24)
> #define ESR_ELx_EC_DABT_CUR (0x25)
> #define ESR_ELx_EC_SP_ALIGN (0x26)
> -/* Unallocated EC: 0x27 */
> +#define ESR_ELx_EC_MOPS (0x27)
> #define ESR_ELx_EC_FP_EXC32 (0x28)
> /* Unallocated EC: 0x29 - 0x2B */
> #define ESR_ELx_EC_FP_EXC64 (0x2C)
> @@ -356,6 +356,15 @@
> #define ESR_ELx_SME_ISS_ZA_DISABLED 3
> #define ESR_ELx_SME_ISS_ZT_DISABLED 4
>
> +/* ISS field definitions for MOPS exceptions */
> +#define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24)
> +#define ESR_ELx_MOPS_ISS_FROM_EPILOGUE (UL(1) << 18)
> +#define ESR_ELx_MOPS_ISS_WRONG_OPTION (UL(1) << 17)
> +#define ESR_ELx_MOPS_ISS_OPTION_A (UL(1) << 16)
> +#define ESR_ELx_MOPS_ISS_DESTREG(esr) (((esr) & (UL(0x1f) << 10)) >> 10)
> +#define ESR_ELx_MOPS_ISS_SRCREG(esr) (((esr) & (UL(0x1f) << 5)) >> 5)
> +#define ESR_ELx_MOPS_ISS_SIZEREG(esr) (((esr) & (UL(0x1f) << 0)) >> 0)
> +
> #ifndef __ASSEMBLY__
> #include <asm/types.h>
>
> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
> index e73af709cb7a..72e83af0135f 100644
> --- a/arch/arm64/include/asm/exception.h
> +++ b/arch/arm64/include/asm/exception.h
> @@ -77,6 +77,7 @@ void do_el0_svc(struct pt_regs *regs);
> void do_el0_svc_compat(struct pt_regs *regs);
> void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
> void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
> +void do_el0_mops(struct pt_regs *regs, unsigned long esr);
> void do_serror(struct pt_regs *regs, unsigned long esr);
> void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
>
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 3af3c01c93a6..a8ec174e5b0e 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -611,6 +611,14 @@ static void noinstr el0_bti(struct pt_regs *regs)
> exit_to_user_mode(regs);
> }
>
> +static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
> +{
> + enter_from_user_mode(regs);
> + local_daif_restore(DAIF_PROCCTX);
> + do_el0_mops(regs, esr);
> + exit_to_user_mode(regs);
> +}
> +
> static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
> {
> enter_from_user_mode(regs);
> @@ -688,6 +696,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
> case ESR_ELx_EC_BTI:
> el0_bti(regs);
> break;
> + case ESR_ELx_EC_MOPS:
> + el0_mops(regs, esr);
> + break;
> case ESR_ELx_EC_BREAKPT_LOW:
> case ESR_ELx_EC_SOFTSTP_LOW:
> case ESR_ELx_EC_WATCHPT_LOW:
> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
> index 4bb1b8f47298..32dc692bffd3 100644
> --- a/arch/arm64/kernel/traps.c
> +++ b/arch/arm64/kernel/traps.c
> @@ -514,6 +514,57 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
> die("Oops - FPAC", regs, esr);
> }
>
> +void do_el0_mops(struct pt_regs *regs, unsigned long esr)
> +{
> + bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION;
> + bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A;
> + int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
> + int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
> + int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
> + unsigned long dst, src, size;
> +
> + dst = pt_regs_read_reg(regs, dstreg);
> + src = pt_regs_read_reg(regs, srcreg);
> + size = pt_regs_read_reg(regs, sizereg);
> +
> + /*
> + * Put the registers back in the original format suitable for a
> + * prologue instruction, using the generic return routine from the
> + * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
> + */
> + if (esr & ESR_ELx_MOPS_ISS_MEM_INST) {
> + /* SET* instruction */
> + if (option_a ^ wrong_option) {
> + /* Format is from Option A; forward set */
> + pt_regs_write_reg(regs, dstreg, dst + size);
> + pt_regs_write_reg(regs, sizereg, -size);
> + }
> + } else {
> + /* CPY* instruction */
> + if (!(option_a ^ wrong_option)) {
> + /* Format is from Option B */
> + if (regs->pstate & PSR_N_BIT) {
> + /* Backward copy */
> + pt_regs_write_reg(regs, dstreg, dst - size);
> + pt_regs_write_reg(regs, srcreg, src - size);
> + }
> + } else {
> + /* Format is from Option A */
> + if (size & BIT(63)) {
> + /* Forward copy */
> + pt_regs_write_reg(regs, dstreg, dst + size);
> + pt_regs_write_reg(regs, srcreg, src + size);
> + pt_regs_write_reg(regs, sizereg, -size);
> + }
> + }
> + }
> +
> + if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE)
> + regs->pc -= 8;
> + else
> + regs->pc -= 4;
> +}
> +
> #define __user_cache_maint(insn, address, res) \
> if (address >= TASK_SIZE_MAX) { \
> res = -EFAULT; \
> @@ -824,6 +875,7 @@ static const char *esr_class_str[] = {
> [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
> [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
> [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
> + [ESR_ELx_EC_MOPS] = "MOPS",
> [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
> [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
> [ESR_ELx_EC_SERROR] = "SError",
--
Shaoqin
Powered by blists - more mailing lists