[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200831224933.2129891-1-brho@google.com>
Date: Mon, 31 Aug 2020 18:49:33 -0400
From: Barret Rhoden <brho@...gle.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
Andrii Nakryiko <andriin@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...omium.org>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH] libbpf: Support setting map max_entries at runtime
The max_entries for a BPF map may depend on runtime parameters.
Currently, we need to know the maximum value at BPF compile time. For
instance, if you want an array map with NR_CPUS entries, you would hard
code your architecture's largest value for CONFIG_NR_CPUS. This wastes
memory at runtime.
For the NR_CPU case, one could use a PERCPU map type, but those maps are
limited in functionality. For instance, BPF programs can only access
their own PERCPU part of the map, and the maps are not mmappable.
This commit allows the use of sentinel values in BPF map definitions,
which libbpf patches at runtime.
For starters, we support NUM_POSSIBLE_CPUS: e.g.
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, NUM_POSSIBLE_CPUS);
__type(key, u32);
__type(value, struct cpu_data);
} cpu_blobs SEC(".maps");
This can be extended to other runtime dependent values, such as the
maximum number of threads (/proc/sys/kernel/threads-max).
Signed-off-by: Barret Rhoden <brho@...gle.com>
---
tools/lib/bpf/bpf_helpers.h | 4 ++++
tools/lib/bpf/libbpf.c | 40 ++++++++++++++++++++++++++++++-------
tools/lib/bpf/libbpf.h | 4 ++++
3 files changed, 41 insertions(+), 7 deletions(-)
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index f67dce2af802..38b431d85ac6 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -74,6 +74,10 @@ enum libbpf_tristate {
TRI_MODULE = 2,
};
+enum libbpf_max_entries {
+ NUM_POSSIBLE_CPUS = (unsigned int)-1,
+};
+
#define __kconfig __attribute__((section(".kconfig")))
#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 11e4725b8b1c..7d0e9792e015 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1868,36 +1868,55 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
* encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
* type definition, while using only sizeof(void *) space in ELF data section.
*/
-static bool get_map_field_int(const char *map_name, const struct btf *btf,
- const struct btf_member *m, __u32 *res)
+static struct btf_array *get_map_field_arr_info(const char *map_name,
+ const struct btf *btf,
+ const struct btf_member *m)
{
const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
const char *name = btf__name_by_offset(btf, m->name_off);
- const struct btf_array *arr_info;
const struct btf_type *arr_t;
if (!btf_is_ptr(t)) {
pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
map_name, name, btf_kind(t));
- return false;
+ return NULL;
}
arr_t = btf__type_by_id(btf, t->type);
if (!arr_t) {
pr_warn("map '%s': attr '%s': type [%u] not found.\n",
map_name, name, t->type);
- return false;
+ return NULL;
}
if (!btf_is_array(arr_t)) {
pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
map_name, name, btf_kind(arr_t));
- return false;
+ return NULL;
}
- arr_info = btf_array(arr_t);
+ return btf_array(arr_t);
+}
+
+static bool get_map_field_int(const char *map_name, const struct btf *btf,
+ const struct btf_member *m, __u32 *res)
+{
+ const struct btf_array *arr_info;
+
+ arr_info = get_map_field_arr_info(map_name, btf, m);
+ if (arr_info == NULL)
+ return false;
*res = arr_info->nelems;
return true;
}
+static void set_map_field_int(const char *map_name, const struct btf *btf,
+ const struct btf_member *m, __u32 val)
+{
+ struct btf_array *arr_info;
+
+ arr_info = get_map_field_arr_info(map_name, btf, m);
+ arr_info->nelems = val;
+}
+
static int build_map_pin_path(struct bpf_map *map, const char *path)
{
char buf[PATH_MAX];
@@ -1951,6 +1970,13 @@ static int parse_btf_map_def(struct bpf_object *obj,
return -EINVAL;
pr_debug("map '%s': found max_entries = %u.\n",
map->name, map->def.max_entries);
+ if (map->def.max_entries == NUM_POSSIBLE_CPUS) {
+ map->def.max_entries = libbpf_num_possible_cpus();
+ set_map_field_int(map->name, obj->btf, m,
+ map->def.max_entries);
+ pr_debug("map '%s': adjusting max_entries = %u.\n",
+ map->name, map->def.max_entries);
+ }
} else if (strcmp(name, "map_flags") == 0) {
if (!get_map_field_int(map->name, obj->btf, m,
&map->def.map_flags))
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 334437af3014..42cba5bb1b04 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -717,6 +717,10 @@ enum libbpf_tristate {
TRI_MODULE = 2,
};
+enum libbpf_max_entries {
+ NUM_POSSIBLE_CPUS = -1,
+};
+
#ifdef __cplusplus
} /* extern "C" */
#endif
--
2.28.0.402.g5ffc5be6b7-goog
Powered by blists - more mailing lists