[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200406210022.32265-1-msuchanek@suse.de>
Date: Mon, 6 Apr 2020 23:00:22 +0200
From: Michal Suchanek <msuchanek@...e.de>
To: linuxppc-dev@...ts.ozlabs.org, Nicholas Piggin <npiggin@...il.com>
Cc: Michal Suchanek <msuchanek@...e.de>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
linux-kernel@...r.kernel.org,
Christophe Leroy <christophe.leroy@....fr>
Subject: [PATCH] powerpcs: perf: consolidate perf_callchain_user_64 and perf_callchain_user_32
perf_callchain_user_64 and perf_callchain_user_32 are nearly identical.
Consolidate into one function with thin wrappers.
Suggested-by: Nicholas Piggin <npiggin@...il.com>
Signed-off-by: Michal Suchanek <msuchanek@...e.de>
---
arch/powerpc/perf/callchain.h | 24 +++++++++++++++++++++++-
arch/powerpc/perf/callchain_32.c | 21 ++-------------------
arch/powerpc/perf/callchain_64.c | 14 ++++----------
3 files changed, 29 insertions(+), 30 deletions(-)
diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h
index 7a2cb9e1181a..7540bb71cb60 100644
--- a/arch/powerpc/perf/callchain.h
+++ b/arch/powerpc/perf/callchain.h
@@ -2,7 +2,7 @@
#ifndef _POWERPC_PERF_CALLCHAIN_H
#define _POWERPC_PERF_CALLCHAIN_H
-int read_user_stack_slow(void __user *ptr, void *buf, int nb);
+int read_user_stack_slow(const void __user *ptr, void *buf, int nb);
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs);
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
@@ -16,4 +16,26 @@ static inline bool invalid_user_sp(unsigned long sp)
return (!sp || (sp & mask) || (sp > top));
}
+/*
+ * On 32-bit we just access the address and let hash_page create a
+ * HPTE if necessary, so there is no need to fall back to reading
+ * the page tables. Since this is called at interrupt level,
+ * do_page_fault() won't treat a DSI as a page fault.
+ */
+static inline int __read_user_stack(const void __user *ptr, void *ret,
+ size_t size)
+{
+ int rc;
+
+ if ((unsigned long)ptr > TASK_SIZE - size ||
+ ((unsigned long)ptr & (size - 1)))
+ return -EFAULT;
+ rc = probe_user_read(ret, ptr, size);
+
+ if (rc && IS_ENABLED(CONFIG_PPC64))
+ return read_user_stack_slow(ptr, ret, size);
+
+ return rc;
+}
+
#endif /* _POWERPC_PERF_CALLCHAIN_H */
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index 8aa951003141..1b4621f177e8 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -31,26 +31,9 @@
#endif /* CONFIG_PPC64 */
-/*
- * On 32-bit we just access the address and let hash_page create a
- * HPTE if necessary, so there is no need to fall back to reading
- * the page tables. Since this is called at interrupt level,
- * do_page_fault() won't treat a DSI as a page fault.
- */
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+static int read_user_stack_32(const unsigned int __user *ptr, unsigned int *ret)
{
- int rc;
-
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- rc = probe_user_read(ret, ptr, sizeof(*ret));
-
- if (IS_ENABLED(CONFIG_PPC64) && rc)
- return read_user_stack_slow(ptr, ret, 4);
-
- return rc;
+ return __read_user_stack(ptr, ret, sizeof(*ret);
}
/*
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index df1ffd8b20f2..55bbc25a54ed 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -24,7 +24,7 @@
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
-int read_user_stack_slow(void __user *ptr, void *buf, int nb)
+int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
{
int ret = -EFAULT;
pgd_t *pgdir;
@@ -65,16 +65,10 @@ int read_user_stack_slow(void __user *ptr, void *buf, int nb)
return ret;
}
-static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
+static int read_user_stack_64(const unsigned long __user *ptr,
+ unsigned long *ret)
{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
- ((unsigned long)ptr & 7))
- return -EFAULT;
-
- if (!probe_user_read(ret, ptr, sizeof(*ret)))
- return 0;
-
- return read_user_stack_slow(ptr, ret, 8);
+ return __read_user_stack(ptr, ret, sizeof(*ret));
}
/*
--
2.23.0
Powered by blists - more mailing lists