[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z6zWSXzKctkpyH7-@arm.com>
Date: Wed, 12 Feb 2025 17:11:37 +0000
From: Catalin Marinas <catalin.marinas@....com>
To: Tong Tiangen <tongtiangen@...wei.com>
Cc: Mark Rutland <mark.rutland@....com>,
Jonathan Cameron <Jonathan.Cameron@...wei.com>,
Mauro Carvalho Chehab <mchehab+huawei@...nel.org>,
Will Deacon <will@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
James Morse <james.morse@....com>,
Robin Murphy <robin.murphy@....com>,
Andrey Konovalov <andreyknvl@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Alexander Potapenko <glider@...gle.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
"Aneesh Kumar K.V" <aneesh.kumar@...nel.org>,
"Naveen N. Rao" <naveen.n.rao@...ux.ibm.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
linux-arm-kernel@...ts.infradead.org, linux-mm@...ck.org,
linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
kasan-dev@...glegroups.com, wangkefeng.wang@...wei.com,
Guohanjun <guohanjun@...wei.com>
Subject: Re: [PATCH v13 4/5] arm64: support copy_mc_[user]_highpage()
On Mon, Dec 09, 2024 at 10:42:56AM +0800, Tong Tiangen wrote:
> Currently, many scenarios that can tolerate memory errors when copying page
> have been supported in the kernel[1~5], all of which are implemented by
> copy_mc_[user]_highpage(). arm64 should also support this mechanism.
>
> Due to mte, arm64 needs to have its own copy_mc_[user]_highpage()
> architecture implementation, macros __HAVE_ARCH_COPY_MC_HIGHPAGE and
> __HAVE_ARCH_COPY_MC_USER_HIGHPAGE have been added to control it.
>
> Add new helper copy_mc_page() which provide a page copy implementation with
> hardware memory error safe. The code logic of copy_mc_page() is the same as
> copy_page(), the main difference is that the ldp insn of copy_mc_page()
> contains the fixup type EX_TYPE_KACCESS_ERR_ZERO_MEM_ERR, therefore, the
> main logic is extracted to copy_page_template.S. In addition, the fixup of
> MOPS insn is not considered at present.
Could we not add the exception table entry permanently but ignore the
exception table entry if it's not on the do_sea() path? That would save
some code duplication.
> diff --git a/arch/arm64/lib/copy_mc_page.S b/arch/arm64/lib/copy_mc_page.S
> new file mode 100644
> index 000000000000..51564828c30c
> --- /dev/null
> +++ b/arch/arm64/lib/copy_mc_page.S
> @@ -0,0 +1,37 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +
> +#include <linux/linkage.h>
> +#include <linux/const.h>
> +#include <asm/assembler.h>
> +#include <asm/page.h>
> +#include <asm/cpufeature.h>
> +#include <asm/alternative.h>
> +#include <asm/asm-extable.h>
> +#include <asm/asm-uaccess.h>
> +
> +/*
> + * Copy a page from src to dest (both are page aligned) with memory error safe
> + *
> + * Parameters:
> + * x0 - dest
> + * x1 - src
> + * Returns:
> + * x0 - Return 0 if copy success, or -EFAULT if anything goes wrong
> + * while copying.
> + */
> + .macro ldp1 reg1, reg2, ptr, val
> + KERNEL_MEM_ERR(9998f, ldp \reg1, \reg2, [\ptr, \val])
> + .endm
> +
> +SYM_FUNC_START(__pi_copy_mc_page)
> +#include "copy_page_template.S"
> +
> + mov x0, #0
> + ret
> +
> +9998: mov x0, #-EFAULT
> + ret
> +
> +SYM_FUNC_END(__pi_copy_mc_page)
> +SYM_FUNC_ALIAS(copy_mc_page, __pi_copy_mc_page)
> +EXPORT_SYMBOL(copy_mc_page)
[...]
> diff --git a/arch/arm64/lib/copy_page_template.S b/arch/arm64/lib/copy_page_template.S
> new file mode 100644
> index 000000000000..f96c7988c93d
> --- /dev/null
> +++ b/arch/arm64/lib/copy_page_template.S
> @@ -0,0 +1,70 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2012 ARM Ltd.
> + */
> +
> +/*
> + * Copy a page from src to dest (both are page aligned)
> + *
> + * Parameters:
> + * x0 - dest
> + * x1 - src
> + */
> +
> +#ifdef CONFIG_AS_HAS_MOPS
> + .arch_extension mops
> +alternative_if_not ARM64_HAS_MOPS
> + b .Lno_mops
> +alternative_else_nop_endif
> +
> + mov x2, #PAGE_SIZE
> + cpypwn [x0]!, [x1]!, x2!
> + cpymwn [x0]!, [x1]!, x2!
> + cpyewn [x0]!, [x1]!, x2!
> + ret
> +.Lno_mops:
> +#endif
[...]
So if we have FEAT_MOPS, the machine check won't work?
Kristina is going to post MOPS support for the uaccess routines soon.
You can see how they are wired up and do something similar here.
But I'd prefer if we had the same code, only the exception table entry
treated differently. Similarly for the MTE tag copying.
--
Catalin
Powered by blists - more mailing lists