[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <154126101356.14912.1379688281618914741.stgit@devbox>
Date: Sun, 4 Nov 2018 01:03:34 +0900
From: Masami Hiramatsu <mhiramat@...nel.org>
To: Steven Rostedt <rostedt@...dmis.org>
Cc: Masami Hiramatsu <mhiramat@...nel.org>,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH] tracing/kprobes: Avoid parsing symbol+offset when updating arguments
Introduce symbol_offset data structure for avoiding symbol+offset
parsing when updating arguments.
For kprobe events, "@symbol+offset" is supported, that requires
to be updated when new module is loaded because @symbol address
can be solved at that point. Currently kprobe events saves
the "symbol+offset" string and parse it repeatedly, but that
is inefficient.
This introduces symbol_offset data structure which can save the
offset and symbol separated, so that we don't need to parse it
anymore.
Signed-off-by: Masami Hiramatsu <mhiramat@...nel.org>
---
kernel/trace/trace_probe.c | 49 +++++++++++++++++++++++++-------------------
kernel/trace/trace_probe.h | 7 +++++-
2 files changed, 34 insertions(+), 22 deletions(-)
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index bd30e9398d2a..0a3af7d6e133 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -201,6 +201,26 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
return ret;
}
+static struct symbol_offset *allocate_symbol_offset(char *sym_offs_str)
+{
+ int ret;
+ long offset = 0;
+ struct symbol_offset *sof;
+
+ ret = traceprobe_split_symbol_offset(sym_offs_str, &offset);
+ if (ret)
+ return ERR_PTR(ret);
+
+ sof = kzalloc(sizeof(struct symbol_offset) + strlen(sym_offs_str) + 1,
+ GFP_KERNEL);
+ if (!sof)
+ return ERR_PTR(-ENOMEM);
+
+ sof->offset = offset;
+ strcpy(sof->symbol, sym_offs_str);
+ return sof;
+}
+
/* Recursive argument parser */
static int
parse_probe_arg(char *arg, const struct fetch_type *type,
@@ -253,9 +273,9 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
/* Preserve symbol for updating */
code->op = FETCH_NOP_SYMBOL;
- code->data = kstrdup(arg + 1, GFP_KERNEL);
- if (!code->data)
- return -ENOMEM;
+ code->symoffs = allocate_symbol_offset(arg + 1);
+ if (IS_ERR(code->symoffs))
+ return PTR_ERR(code->symoffs);
if (++code == end)
return -E2BIG;
@@ -483,7 +503,7 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
if (ret) {
for (code = tmp; code < tmp + FETCH_INSN_MAX; code++)
if (code->op == FETCH_NOP_SYMBOL)
- kfree(code->data);
+ kfree(code->symoffs);
}
kfree(tmp);
@@ -513,7 +533,7 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
while (code && code->op != FETCH_OP_END) {
if (code->op == FETCH_NOP_SYMBOL)
- kfree(code->data);
+ kfree(code->symoffs);
code++;
}
kfree(arg->code);
@@ -525,31 +545,18 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
int traceprobe_update_arg(struct probe_arg *arg)
{
struct fetch_insn *code = arg->code;
- long offset;
- char *tmp;
- char c;
- int ret = 0;
while (code && code->op != FETCH_OP_END) {
if (code->op == FETCH_NOP_SYMBOL) {
if (code[1].op != FETCH_OP_IMM)
return -EINVAL;
- tmp = strpbrk(code->data, "+-");
- if (tmp)
- c = *tmp;
- ret = traceprobe_split_symbol_offset(code->data,
- &offset);
- if (ret)
- return ret;
-
code[1].immediate =
- (unsigned long)kallsyms_lookup_name(code->data);
- if (tmp)
- *tmp = c;
+ (unsigned long)kallsyms_lookup_name(
+ code->symoffs->symbol);
if (!code[1].immediate)
return -ENOENT;
- code[1].immediate += offset;
+ code[1].immediate += code->symoffs->offset;
}
code++;
}
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 974afc1a3e73..942424d05427 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -103,6 +103,11 @@ enum fetch_op {
FETCH_NOP_SYMBOL, /* Unresolved Symbol holder */
};
+struct symbol_offset {
+ long offset;
+ char symbol[];
+};
+
struct fetch_insn {
enum fetch_op op;
union {
@@ -117,7 +122,7 @@ struct fetch_insn {
unsigned char rshift;
};
unsigned long immediate;
- void *data;
+ struct symbol_offset *symoffs;
};
};
Powered by blists - more mailing lists