[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20140228143316.211d4034@gandalf.local.home>
Date: Fri, 28 Feb 2014 14:33:16 -0500
From: Steven Rostedt <rostedt@...dmis.org>
To: "H. Peter Anvin" <hpa@...or.com>
Cc: Peter Zijlstra <peterz@...radead.org>,
Vince Weaver <vincent.weaver@...ne.edu>,
Linux Kernel <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...hat.com>, Jiri Olsa <jolsa@...hat.com>,
Arnaldo Carvalho de Melo <acme@...stprotocols.net>
Subject: [PATCH] x86: Rename copy_from_user_nmi() to copy_from_user_trace()
[ H. Peter, Here's the rename patch. I did not include your update. You
can add that first and then massage this patch on top. But this isn't
critical for mainline or stable, where as I believe your patch is. ]
The tracing utilities sometimes need to read from userspace (stack tracing),
and to do this it has as special copy_from_user function called
copy_from_user_nmi(). Well, as tracers can call this from outside of
nmi context, the "_nmi" part is a misnomer and "_trace" is a better
name.
Signed-off-by: Steven Rostedt <rostedt@...dmis.org>
---
arch/x86/include/asm/perf_event.h | 2 +-
arch/x86/include/asm/uaccess.h | 2 +-
arch/x86/kernel/cpu/perf_event.c | 4 ++--
arch/x86/kernel/cpu/perf_event_intel_ds.c | 2 +-
arch/x86/kernel/cpu/perf_event_intel_lbr.c | 2 +-
arch/x86/lib/usercopy.c | 10 ++++++----
arch/x86/oprofile/backtrace.c | 4 ++--
7 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8249df4..7bf4b25 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -274,6 +274,6 @@ static inline void perf_check_microcode(void) { }
static inline void amd_pmu_disable_virt(void) { }
#endif
-#define arch_perf_out_copy_user copy_from_user_nmi
+#define arch_perf_out_copy_user copy_from_user_trace
#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8ec57c0..d734baf 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -515,7 +515,7 @@ struct __large_struct { unsigned long buf[100]; };
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
extern unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+copy_from_user_trace(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8e13293..7eda4af 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1988,7 +1988,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
frame.next_frame = 0;
frame.return_address = 0;
- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+ bytes = copy_from_user_trace(&frame, fp, sizeof(frame));
if (bytes != 0)
break;
@@ -2040,7 +2040,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
frame.next_frame = NULL;
frame.return_address = 0;
- bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+ bytes = copy_from_user_trace(&frame, fp, sizeof(frame));
if (bytes != 0)
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index ae96cfa..95d76c6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -788,7 +788,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
u8 *buf = this_cpu_read(insn_buffer);
size = ip - to; /* Must fit our buffer, see above */
- bytes = copy_from_user_nmi(buf, (void __user *)to, size);
+ bytes = copy_from_user_trace(buf, (void __user *)to, size);
if (bytes != 0)
return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index d82d155..0505ada 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -490,7 +490,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
return X86_BR_NONE;
/* may fail if text not present */
- bytes = copy_from_user_nmi(buf, (void __user *)from, size);
+ bytes = copy_from_user_trace(buf, (void __user *)from, size);
if (bytes != 0)
return X86_BR_NONE;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ddf9ecb..131ff16e 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -11,11 +11,13 @@
#include <linux/sched.h>
/*
- * We rely on the nested NMI work to allow atomic faults from the NMI path; the
- * nested NMI paths are careful to preserve CR2.
+ * Used by tracing, needs to restore state of the cr2 register if
+ * the copy triggered a page fault. That's because tracing can happen
+ * between the time a normal page fault occurred and the cr2 is read
+ * by the handler.
*/
unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+copy_from_user_trace(void *to, const void __user *from, unsigned long n)
{
unsigned long ret;
@@ -33,4 +35,4 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return ret;
}
-EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+EXPORT_SYMBOL_GPL(copy_from_user_trace);
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 5d04be5..7831650 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -46,7 +46,7 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
struct stack_frame_ia32 *fp;
unsigned long bytes;
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+ bytes = copy_from_user_trace(bufhead, head, sizeof(bufhead));
if (bytes != 0)
return NULL;
@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
struct stack_frame bufhead[2];
unsigned long bytes;
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+ bytes = copy_from_user_trace(bufhead, head, sizeof(bufhead));
if (bytes != 0)
return NULL;
--
1.8.1.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists