[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <05f4a0a349468b106362e1b96e2aaf970fcafb79.1496189155.git.kafai@fb.com>
Date: Tue, 30 May 2017 17:08:31 -0700
From: Martin KaFai Lau <kafai@...com>
To: <netdev@...r.kernel.org>
CC: Alexei Starovoitov <ast@...com>,
Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>
Subject: [PATCH net-next 2/8] bpf: Introduce bpf_map ID
This patch generates an unique ID for each created bpf_map.
The approach is similar to the earlier patch for bpf_prog ID.
It is worth to note that the bpf_map's ID and bpf_prog's ID
are in two independent ID spaces and both have the same valid range:
[1, INT_MAX).
Signed-off-by: Martin KaFai Lau <kafai@...com>
Acked-by: Alexei Starovoitov <ast@...com>
Acked-by: Daniel Borkmann <daniel@...earbox.net>
---
include/linux/bpf.h | 1 +
kernel/bpf/syscall.c | 34 +++++++++++++++++++++++++++++++++-
2 files changed, 34 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c2793a732edc..ea78d87cbc3e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -46,6 +46,7 @@ struct bpf_map {
u32 max_entries;
u32 map_flags;
u32 pages;
+ u32 id;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 697bdb6ceceb..a6fe034dbc09 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -27,6 +27,8 @@
DEFINE_PER_CPU(int, bpf_prog_active);
DEFINE_IDR(prog_idr);
DEFINE_SPINLOCK(prog_idr_lock);
+DEFINE_IDR(map_idr);
+DEFINE_SPINLOCK(map_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly;
@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
free_uid(user);
}
+static int bpf_map_alloc_id(struct bpf_map *map)
+{
+ int id;
+
+ spin_lock_bh(&map_idr_lock);
+ id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
+ if (id > 0)
+ map->id = id;
+ spin_unlock_bh(&map_idr_lock);
+
+ if (WARN_ON_ONCE(!id))
+ return -ENOSPC;
+
+ return id > 0 ? 0 : id;
+}
+
+static void bpf_map_free_id(struct bpf_map *map)
+{
+ spin_lock_bh(&map_idr_lock);
+ idr_remove(&map_idr, map->id);
+ spin_unlock_bh(&map_idr_lock);
+}
+
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
void bpf_map_put(struct bpf_map *map)
{
if (atomic_dec_and_test(&map->refcnt)) {
+ bpf_map_free_id(map);
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
}
@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map_nouncharge;
+ err = bpf_map_alloc_id(map);
+ if (err)
+ goto free_map;
+
err = bpf_map_new_fd(map);
if (err < 0)
/* failed to allocate fd */
- goto free_map;
+ goto free_id;
trace_bpf_map_create(map, err);
return err;
+free_id:
+ bpf_map_free_id(map);
free_map:
bpf_map_uncharge_memlock(map);
free_map_nouncharge:
--
2.9.3
Powered by blists - more mailing lists