[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190116142119.8358-6-quentin.monnet@netronome.com>
Date: Wed, 16 Jan 2019 14:21:15 +0000
From: Quentin Monnet <quentin.monnet@...ronome.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: netdev@...r.kernel.org, oss-drivers@...ronome.com,
Quentin Monnet <quentin.monnet@...ronome.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Stanislav Fomichev <sdf@...gle.com>
Subject: [PATCH bpf-next v4 5/9] tools: bpftool: add probes for eBPF map types
Add new probes for eBPF map types, to detect what are the ones available
on the system. Try creating one map of each type, and see if the kernel
complains.
Sample output:
# bpftool feature probe kernel
...
Scanning eBPF map types...
eBPF map_type hash is available
eBPF map_type array is available
eBPF map_type prog_array is available
...
# bpftool --json --pretty feature probe kernel
{
...
"map_types": {
"have_hash_map_type": true,
"have_array_map_type": true,
"have_prog_array_map_type": true,
...
}
}
v3:
- Use a switch with all enum values for setting specific map parameters,
so that gcc complains at compile time (-Wswitch-enum) if new map types
were added to the kernel but libbpf was not updated.
v2:
- Move probes from bpftool to libbpf.
- Remove C-style macros output from this patch.
Signed-off-by: Quentin Monnet <quentin.monnet@...ronome.com>
---
tools/bpf/bpftool/feature.c | 26 +++++++++++
tools/bpf/bpftool/main.h | 3 ++
tools/bpf/bpftool/map.c | 4 +-
tools/lib/bpf/libbpf.h | 1 +
tools/lib/bpf/libbpf.map | 1 +
tools/lib/bpf/libbpf_probes.c | 84 +++++++++++++++++++++++++++++++++++
6 files changed, 118 insertions(+), 1 deletion(-)
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 037252486033..3d53306bb66d 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -438,6 +438,26 @@ static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types)
print_bool_feature(feat_name, plain_desc, res);
}
+static void probe_map_type(enum bpf_map_type map_type)
+{
+ const char *plain_comment = "eBPF map_type ";
+ char feat_name[128], plain_desc[128];
+ size_t maxlen;
+ bool res;
+
+ res = bpf_probe_map_type(map_type, 0);
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(map_type_name[map_type]) > maxlen) {
+ p_info("map type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ print_bool_feature(feat_name, plain_desc, res);
+}
+
static int do_probe(int argc, char **argv)
{
enum probe_component target = COMPONENT_UNSPEC;
@@ -507,6 +527,12 @@ static int do_probe(int argc, char **argv)
for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
probe_prog_type(i, supported_types);
+ print_end_then_start_section("map_types",
+ "Scanning eBPF map types...");
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i);
+
exit_close_json:
if (json_output) {
/* End current "section" of probes */
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 5cfc6601de9b..d7dd84d3c660 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -75,6 +75,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
+extern const char * const map_type_name[];
+extern const size_t map_type_name_size;
+
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 2037e3dc864b..b73985589929 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -21,7 +21,7 @@
#include "json_writer.h"
#include "main.h"
-static const char * const map_type_name[] = {
+const char * const map_type_name[] = {
[BPF_MAP_TYPE_UNSPEC] = "unspec",
[BPF_MAP_TYPE_HASH] = "hash",
[BPF_MAP_TYPE_ARRAY] = "array",
@@ -48,6 +48,8 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_STACK] = "stack",
};
+const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
+
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 8e63821109ab..72385f6f9415 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -365,6 +365,7 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
*/
LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
__u32 ifindex);
+LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
#ifdef __cplusplus
} /* extern "C" */
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 6355e4c80a86..c08f4c726e8e 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -56,6 +56,7 @@ LIBBPF_0.0.1 {
bpf_object__unpin_maps;
bpf_object__unpin_programs;
bpf_perf_event_read_simple;
+ bpf_probe_map_type;
bpf_probe_prog_type;
bpf_prog_attach;
bpf_prog_detach;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index f08c33fa8dc9..e3d57632a975 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -93,3 +93,87 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
return errno != EINVAL && errno != EOPNOTSUPP;
}
+
+bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
+{
+ int key_size, value_size, max_entries, map_flags;
+ struct bpf_create_map_attr attr = {};
+ int fd = -1, fd_inner;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+ map_flags = 0;
+
+ switch (map_type) {
+ case BPF_MAP_TYPE_STACK_TRACE:
+ value_size = sizeof(__u64);
+ break;
+ case BPF_MAP_TYPE_LPM_TRIE:
+ key_size = sizeof(__u64);
+ value_size = sizeof(__u64);
+ map_flags = BPF_F_NO_PREALLOC;
+ break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ key_size = sizeof(struct bpf_cgroup_storage_key);
+ value_size = sizeof(__u64);
+ max_entries = 0;
+ break;
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ key_size = 0;
+ break;
+ case BPF_MAP_TYPE_UNSPEC:
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_LRU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ default:
+ break;
+ }
+
+ if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ /* TODO: probe for device, once libbpf has a function to create
+ * map-in-map for offload
+ */
+ if (ifindex)
+ return false;
+
+ fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(__u32), sizeof(__u32), 1, 0);
+ if (fd_inner < 0)
+ return false;
+ fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
+ fd_inner, 1, 0);
+ close(fd_inner);
+ } else {
+ /* Note: No other restriction on map type probes for offload */
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+ attr.map_flags = map_flags;
+ attr.map_ifindex = ifindex;
+
+ fd = bpf_create_map_xattr(&attr);
+ }
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
--
2.17.1
Powered by blists - more mailing lists