[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250719043740.4548-5-jeremy.linton@arm.com>
Date: Fri, 18 Jul 2025 23:37:36 -0500
From: Jeremy Linton <jeremy.linton@....com>
To: linux-trace-kernel@...r.kernel.org
Cc: linux-perf-users@...r.kernel.org,
mhiramat@...nel.org,
oleg@...hat.com,
peterz@...radead.org,
acme@...nel.org,
namhyung@...nel.org,
mark.rutland@....com,
alexander.shishkin@...ux.intel.com,
jolsa@...nel.org,
irogers@...gle.com,
adrian.hunter@...el.com,
kan.liang@...ux.intel.com,
thiago.bauermann@...aro.org,
broonie@...nel.org,
yury.khrustalev@....com,
kristina.martsenko@....com,
liaochang1@...wei.com,
catalin.marinas@....com,
will@...nel.org,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Jeremy Linton <jeremy.linton@....com>
Subject: [PATCH v4 4/8] arm64: uaccess: Add additional userspace GCS accessors
Uprobes need more advanced read, push, and pop userspace GCS
functionality. Implement those features using the existing gcsstr()
and copy_from_user().
Its important to note that GCS pages can be read by normal
instructions, but the hardware validates that pages used by GCS
specific operations, have a GCS privilege set. We aren't validating this
in load_user_gcs because it requires stabilizing the VMA over the read
which may fault.
Signed-off-by: Jeremy Linton <jeremy.linton@....com>
---
arch/arm64/include/asm/gcs.h | 52 ++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h
index e3b360c9dba4..f2fc8173fee3 100644
--- a/arch/arm64/include/asm/gcs.h
+++ b/arch/arm64/include/asm/gcs.h
@@ -116,6 +116,45 @@ static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
uaccess_ttbr0_disable();
}
+/*
+ * Unlike put_user_gcs() above, the use of copy_from_user() may provide
+ * an opening for non GCS pages to be used to source data. Therefore this
+ * should only be used in contexts where that is acceptable.
+ */
+static inline u64 load_user_gcs(unsigned long __user *addr, int *err)
+{
+ unsigned long ret;
+ u64 load = 0;
+
+ gcsb_dsync();
+ ret = copy_from_user(&load, addr, sizeof(load));
+ if (ret != 0)
+ *err = ret;
+ return load;
+}
+
+static inline void push_user_gcs(unsigned long val, int *err)
+{
+ u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
+
+ gcspr -= sizeof(u64);
+ put_user_gcs(val, (unsigned long __user *)gcspr, err);
+ if (!*err)
+ write_sysreg_s(gcspr, SYS_GCSPR_EL0);
+}
+
+static inline u64 pop_user_gcs(int *err)
+{
+ u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
+ u64 read_val;
+
+ read_val = load_user_gcs((unsigned long __user *)gcspr, err);
+ if (!*err)
+ write_sysreg_s(gcspr + sizeof(u64), SYS_GCSPR_EL0);
+
+ return read_val;
+}
+
#else
static inline bool task_gcs_el0_enabled(struct task_struct *task)
@@ -126,6 +165,10 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
static inline void gcs_set_el0_mode(struct task_struct *task) { }
static inline void gcs_free(struct task_struct *task) { }
static inline void gcs_preserve_current_state(void) { }
+static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
+ int *err) { }
+static inline void push_user_gcs(unsigned long val, int *err) { }
+
static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
const struct kernel_clone_args *args)
{
@@ -136,6 +179,15 @@ static inline int gcs_check_locked(struct task_struct *task,
{
return 0;
}
+static inline u64 load_user_gcs(unsigned long __user *addr, int *err)
+{
+ *err = -EFAULT;
+ return 0;
+}
+static inline u64 pop_user_gcs(int *err)
+{
+ return 0;
+}
#endif
--
2.50.1
Powered by blists - more mailing lists