[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200521191804.3448912-1-kafai@fb.com>
Date: Thu, 21 May 2020 12:18:04 -0700
From: Martin KaFai Lau <kafai@...com>
To: <bpf@...r.kernel.org>
CC: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>,
<netdev@...r.kernel.org>, Andrey Ignatov <rdna@...com>
Subject: [PATCH bpf-next 2/3] bpf: Relax the max_entries check for inner map
This patch relaxes the max_entries check for most of the inner map types
during an update to the outer map. The max_entries of those map types
are only used in runtime. By doing this, an inner map with different
size can be updated to the outer map in runtime.
The max_entries of arraymap and xskmap are used statically
in verification time to generate the inline code, so they
are excluded in this patch.
Cc: Andrey Ignatov <rdna@...com>
Signed-off-by: Martin KaFai Lau <kafai@...com>
---
include/linux/bpf.h | 12 ++++++++++++
include/linux/bpf_types.h | 6 ++++--
kernel/bpf/map_in_map.c | 3 ++-
3 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 1e20b9911d48..1488d2aa41f2 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -99,6 +99,18 @@ struct bpf_map_memory {
/* Cannot be used as an inner map */
#define BPF_MAP_CAP_NO_INNER_MAP (1 << 0)
+/* When a prog has used map-in-map, the verifier requires
+ * an inner-map as a template to verify the access operations
+ * on the outer and inner map. For some inner map-types,
+ * the verifier uses the inner_map's max_entries statically
+ * (e.g. to generate inline code). If this verification
+ * time usage on max_entries applies to an inner map-type,
+ * during runtime, only the inner map with the same
+ * max_entries can be updated to this outer map.
+ *
+ * Please see bpf_map_meta_equal() for details.
+ */
+#define BPF_MAP_CAP_NO_DYNAMIC_INNER_MAP_SIZE (1 << 1)
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 652f17d646dd..4b350f9ad486 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -76,7 +76,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
#endif /* CONFIG_BPF_LSM */
#endif
-BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops, 0)
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops,
+ BPF_MAP_CAP_NO_DYNAMIC_INNER_MAP_SIZE)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops, 0)
/* prog_array->aux->{type,jited} is a runtime binding.
* Doing static check alone in the verifier is not enough,
@@ -114,7 +115,8 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops, 0)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops, 0)
#if defined(CONFIG_XDP_SOCKETS)
-BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops, 0)
+BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops,
+ BPF_MAP_CAP_NO_DYNAMIC_INNER_MAP_SIZE)
#endif
#ifdef CONFIG_INET
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops, 0)
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 6e1286ad7b76..bee1fcfd64f2 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -77,7 +77,8 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
meta0->key_size == meta1->key_size &&
meta0->value_size == meta1->value_size &&
meta0->map_flags == meta1->map_flags &&
- meta0->max_entries == meta1->max_entries;
+ (meta0->max_entries == meta1->max_entries ||
+ !(meta0->capability & BPF_MAP_CAP_NO_DYNAMIC_INNER_MAP_SIZE));
}
void *bpf_map_fd_get_ptr(struct bpf_map *map,
--
2.24.1
Powered by blists - more mailing lists