[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1444826502-49291-30-git-send-email-wangnan0@huawei.com>
Date: Wed, 14 Oct 2015 12:41:40 +0000
From: Wang Nan <wangnan0@...wei.com>
To: <acme@...nel.org>, <ast@...mgrid.com>, <brendan.d.gregg@...il.com>
CC: <a.p.zijlstra@...llo.nl>, <daniel@...earbox.net>,
<dsahern@...il.com>, <hekuang@...wei.com>, <jolsa@...nel.org>,
<lizefan@...wei.com>, <masami.hiramatsu.pt@...achi.com>,
<namhyung@...nel.org>, <paulus@...ba.org>,
<linux-kernel@...r.kernel.org>, <pi3orama@....com>,
<xiakaixu@...wei.com>, Wang Nan <wangnan0@...wei.com>,
Arnaldo Carvalho de Melo <acme@...hat.com>
Subject: [PATCH 29/31] perf tools: Add API to apply config to BPF map
bpf__apply_config() is introduced as the core CPI to apply config
options to all BPF objects. This patch also does the real work for
setting maps events for BPF_MAP_TYPE_PERF_EVENT_ARRAY maps by inserting
file descriptions of a evsel into the BPF map.
This patch is required because we are unable to set all BPF config
during parsing. Events in BPF_MAP_TYPE_PERF_EVENT_ARRAY maps is an
example: during parsing, fds of events is not ready yet.
Signed-off-by: Wang Nan <wangnan0@...wei.com>
Signed-off-by: He Kuang <hekuang@...wei.com>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Alexei Starovoitov <ast@...mgrid.com>
Cc: Brendan Gregg <brendan.d.gregg@...il.com>
Cc: Daniel Borkmann <daniel@...earbox.net>
Cc: David Ahern <dsahern@...il.com>
Cc: He Kuang <hekuang@...wei.com>
Cc: Jiri Olsa <jolsa@...nel.org>
Cc: Kaixu Xia <xiakaixu@...wei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Paul Mackerras <paulus@...ba.org>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: Zefan Li <lizefan@...wei.com>
Cc: pi3orama@....com
Link: http://lkml.kernel.org/n/ebpf-tmg65cm1zaf1zxs7zmvxmxp4@git.kernel.org
---
tools/perf/util/bpf-loader.c | 109 +++++++++++++++++++++++++++++++++++++++++++
tools/perf/util/bpf-loader.h | 15 ++++++
2 files changed, 124 insertions(+)
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index b92c2f7..9d661c0 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -7,6 +7,7 @@
#include <linux/bpf.h>
#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
#include <linux/err.h>
#include "perf.h"
#include "debug.h"
@@ -767,6 +768,107 @@ int bpf__config_obj(struct bpf_object *obj,
return -ENODEV;
}
+static int
+bpf__apply_config_map(struct bpf_map *map)
+{
+ struct bpf_map_priv *priv;
+ struct bpf_map_def def;
+ const char *name;
+ int err, map_fd;
+
+ name = bpf_map__get_name(map);
+ err = bpf_map__get_private(map, (void **)&priv);
+ if (err) {
+ pr_debug("ERROR: failed to get private field from map %s\n",
+ name);
+ return err;
+ }
+ if (!priv) {
+ pr_debug("INFO: nothing to config for map %s\n", name);
+ return 0;
+ }
+
+ map_fd = bpf_map__get_fd(map);
+ if (map_fd < 0) {
+ pr_debug("ERROR: failed to get fd from map %s\n", name);
+ return map_fd;
+ }
+
+ err = bpf_map__get_def(map, &def);
+ if (err) {
+ pr_debug("ERROR: failed to retrive map def from map %s\n",
+ name);
+ return err;
+ }
+
+ if (priv->evsel) {
+ struct xyarray *xy = priv->evsel->fd;
+ unsigned int cpus, i;
+
+ if (!xy) {
+ pr_debug("ERROR: event is not ready for map %s\n", name);
+ return -EINVAL;
+ }
+
+ if (xy->row_size / xy->entry_size != 1) {
+ pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
+ name);
+ return -EINVAL;
+ }
+
+ cpus = xy->entries / (xy->row_size / xy->entry_size);
+ if (cpus > def.max_entries) {
+ pr_debug("ERROR: map %s needs to be enlarge to %d for its event\n",
+ name, cpus);
+ return -EINVAL;
+ } else if (cpus < def.max_entries)
+ pr_debug("WARNING: map %s has more entries than required\n",
+ name);
+
+ for (i = 0; i < cpus; i++) {
+ int *evt_fd = xyarray__entry(xy, i, 0);
+
+ err = bpf_map_update_elem(map_fd, &i, evt_fd,
+ BPF_ANY);
+
+ if (err) {
+ pr_debug("ERROR: failed to insert fd %d to %s[%d]\n",
+ *evt_fd, name, i);
+ return -errno;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+bpf__apply_config_object(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ int err;
+
+ bpf_map__for_each(map, obj) {
+ err = bpf__apply_config_map(map);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int bpf__apply_config(void)
+{
+ struct bpf_object *obj, *tmp;
+ int err;
+
+ bpf_object__for_each_safe(obj, tmp) {
+ err = bpf__apply_config_object(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
#define bpf__strerror_head(err, buf, size) \
char sbuf[STRERR_BUFSIZE], *emsg;\
if (!size)\
@@ -822,3 +924,10 @@ int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
bpf__strerror_end(buf, size);
return 0;
}
+
+int bpf__strerror_apply_config(int err, char *buf, size_t size)
+{
+ bpf__strerror_head(err, buf, size);
+ bpf__strerror_end(buf, size);
+ return 0;
+}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 4c99b21..3a93ba3 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -53,6 +53,8 @@ int bpf__strerror_config_obj(struct bpf_object *obj,
const char *key, struct bpf_config_val *val,
struct perf_evlist *evlist,
int err, char *buf, size_t size);
+int bpf__apply_config(void);
+int bpf__strerror_apply_config(int err, char *buf, size_t size);
#else
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
@@ -93,6 +95,12 @@ bpf__config_obj(struct bpf_object *obj __maybe_unused,
}
static inline int
+bpf__apply_config(void)
+{
+ return 0;
+}
+
+static inline int
__bpf_strerror(char *buf, size_t size)
{
if (!size)
@@ -129,5 +137,12 @@ bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
{
return __bpf_strerror(buf, size);
}
+
+static inline int
+bpf__strerror_apply_config(int err __maybe_unused,
+ char *buf, size_t size)
+{
+ return __bpf_strerror(buf, size);
+}
#endif
#endif
--
1.8.3.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists