lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <f6ba33aaee9137cee7930abea99cf7cea8104d01.1425310176.git.tom.zanussi@linux.intel.com>
Date:	Mon,  2 Mar 2015 10:00:57 -0600
From:	Tom Zanussi <tom.zanussi@...ux.intel.com>
To:	rostedt@...dmis.org
Cc:	masami.hiramatsu.pt@...achi.com, namhyung@...nel.org,
	andi@...stfloor.org, ast@...mgrid.com,
	linux-kernel@...r.kernel.org,
	Tom Zanussi <tom.zanussi@...ux.intel.com>
Subject: [PATCH 04/15] bpf: Export bpf map functionality as trace_map_*

Make the bpf map implementation available outside of kernel/bpf - as a
general-purpose map implementation for tracing, it should be available
to any tracing facility that can make use of it, not just the bpf
syscall.

Create a set of exported tracing_map_* functions and have the bpf
implementation use them.

Signed-off-by: Tom Zanussi <tom.zanussi@...ux.intel.com>
---
 include/linux/bpf.h  |   7 +++
 kernel/bpf/syscall.c | 173 ++++++++++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 163 insertions(+), 17 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bbfceb7..900405bf 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -142,4 +142,11 @@ extern struct bpf_func_proto bpf_map_lookup_elem_proto;
 extern struct bpf_func_proto bpf_map_update_elem_proto;
 extern struct bpf_func_proto bpf_map_delete_elem_proto;
 
+struct bpf_map *tracing_map_create(union bpf_attr *attr);
+void tracing_map_destroy(struct bpf_map *map);
+int tracing_map_update_elem(struct bpf_map *map, void *key, void *value,
+			    union bpf_attr *attr);
+int tracing_map_lookup_elem(struct bpf_map *map, void *key, void *uvalue);
+int tracing_map_delete_elem(struct bpf_map *map, void *key);
+int tracing_map_get_next_key(struct bpf_map *map, void *key, void *next_key);
 #endif /* _LINUX_BPF_H */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 088ac0b..cac8df6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -63,11 +63,24 @@ void bpf_map_put(struct bpf_map *map)
 	}
 }
 
+/**
+ * tracing_map_destroy - destroy (release) a bpf_map instance
+ * @map: The bpf_map to destroy
+ *
+ * Release a bpf_map.  This decrements the map's refcount and
+ * schedules it for deletion.  For all intents and purposes, the user
+ * can assume the map has been deleted after this call.
+ */
+void tracing_map_destroy(struct bpf_map *map)
+{
+	bpf_map_put(map);
+}
+
 static int bpf_map_release(struct inode *inode, struct file *filp)
 {
 	struct bpf_map *map = filp->private_data;
 
-	bpf_map_put(map);
+	tracing_map_destroy(map);
 	return 0;
 }
 
@@ -84,6 +97,32 @@ static const struct file_operations bpf_map_fops = {
 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 
 #define BPF_MAP_CREATE_LAST_FIELD max_entries
+
+/**
+ * tracing_map_create - Create a bpf_map instance
+ * @attr: The bpf_attr for the operation
+ *
+ * Create a bpf_map of the type specified by attr.
+ *
+ * Note that it's assumed that unused attr fields have been zeroed and
+ * that kernel code can be trusted to have done so before calling this
+ * function.
+ *
+ * Return: the map created on success, ERR_PTR otherwise
+ */
+struct bpf_map *tracing_map_create(union bpf_attr *attr)
+{
+	struct bpf_map *map;
+
+	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
+	map = find_and_alloc_map(attr);
+	if (!IS_ERR(map))
+		atomic_set(&map->refcnt, 1);
+
+	return map;
+}
+EXPORT_SYMBOL_GPL(tracing_map_create);
+
 /* called via syscall */
 static int map_create(union bpf_attr *attr)
 {
@@ -94,13 +133,10 @@ static int map_create(union bpf_attr *attr)
 	if (err)
 		return -EINVAL;
 
-	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
-	map = find_and_alloc_map(attr);
+	map = tracing_map_create(attr);
 	if (IS_ERR(map))
 		return PTR_ERR(map);
 
-	atomic_set(&map->refcnt, 1);
-
 	err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
 
 	if (err < 0)
@@ -140,6 +176,40 @@ static void __user *u64_to_ptr(__u64 val)
 	return (void __user *) (unsigned long) val;
 }
 
+/**
+ * tracing_map_lookup_elem - Perform a bpf_map lookup
+ * @map: The bpf_map to perform the lookup on
+ * @key: The key to look up
+ * @uvalue: The caller-owned memory to be filled in with the found value
+ *
+ * Look up the specified key in the bpf_map and if found copy the
+ * value into the uvalue specified by the caller.  The uvalue must be
+ * at least as large as the map->value_size.
+ *
+ * Return: 0 on success, -ENOENT if key not found
+ */
+int tracing_map_lookup_elem(struct bpf_map *map, void *key, void *uvalue)
+{
+	int err = -ENOENT;
+	void *value;
+
+	rcu_read_lock();
+
+	value = map->ops->map_lookup_elem(map, key);
+	if (!value)
+		goto err_unlock;
+
+	memcpy(uvalue, value, map->value_size);
+
+	err = 0;
+
+err_unlock:
+	rcu_read_unlock();
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(tracing_map_lookup_elem);
+
 /* last field in 'union bpf_attr' used by this command */
 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
 
@@ -190,6 +260,39 @@ err_put:
 	return err;
 }
 
+/**
+ * tracing_map_update_elem - Update or add a bpf_map element
+ * @map: The bpf_map to perform the lookup on
+ * @key: The key to update or add
+ * @value: The value to update or add
+ * @attr: The bpf_attr for the operation
+ *
+ * Update or add a value for the specified key in the bpf_map.  Both
+ * the key and value are user-owned memory, copies of which will be
+ * added to the map.
+ *
+ * Note that it's assumed that unused attr fields have been zeroed and
+ * that kernel code can be trusted to have done so before calling this
+ * function.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int tracing_map_update_elem(struct bpf_map *map, void *key, void *value,
+			    union bpf_attr *attr)
+{
+	int err;
+
+	/* eBPF program that use maps are running under rcu_read_lock(),
+	 * therefore all map accessors rely on this fact, so do the same here
+	 */
+	rcu_read_lock();
+	err = map->ops->map_update_elem(map, key, value, attr->flags);
+	rcu_read_unlock();
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(tracing_map_update_elem);
+
 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 
 static int map_update_elem(union bpf_attr *attr)
@@ -227,12 +330,7 @@ static int map_update_elem(union bpf_attr *attr)
 	if (copy_from_user(value, uvalue, map->value_size) != 0)
 		goto free_value;
 
-	/* eBPF program that use maps are running under rcu_read_lock(),
-	 * therefore all map accessors rely on this fact, so do the same here
-	 */
-	rcu_read_lock();
-	err = map->ops->map_update_elem(map, key, value, attr->flags);
-	rcu_read_unlock();
+	err = tracing_map_update_elem(map, key, value, attr);
 
 free_value:
 	kfree(value);
@@ -243,6 +341,27 @@ err_put:
 	return err;
 }
 
+/**
+ * tracing_map_delete_elem - Delete an element from a bpf_map
+ * @map: The bpf_map to perform the lookup on
+ * @key: The key to delete
+ *
+ * Delete the bpf_map element with the specified key.
+ *
+ * Return: 0 on success, -ENOENT if key not found
+ */
+int tracing_map_delete_elem(struct bpf_map *map, void *key)
+{
+	int err;
+
+	rcu_read_lock();
+	err = map->ops->map_delete_elem(map, key);
+	rcu_read_unlock();
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(tracing_map_delete_elem);
+
 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
 
 static int map_delete_elem(union bpf_attr *attr)
@@ -270,9 +389,7 @@ static int map_delete_elem(union bpf_attr *attr)
 	if (copy_from_user(key, ukey, map->key_size) != 0)
 		goto free_key;
 
-	rcu_read_lock();
-	err = map->ops->map_delete_elem(map, key);
-	rcu_read_unlock();
+	err = tracing_map_delete_elem(map, key);
 
 free_key:
 	kfree(key);
@@ -281,6 +398,30 @@ err_put:
 	return err;
 }
 
+/**
+ * tracing_map_get_next_key - Get the next key in the bpf_map
+ * @map: The bpf_map to perform the lookup on
+ * @key: The key to look up
+ * @next_key: The caller-owned memory to be filled in with the next key
+ *
+ * Look up the specified key in the bpf_map and if found copy the next
+ * key into the next_key specified by the caller.  If the key isn't
+ * found, just copy in the first key found.
+ *
+ * Return: 0 on success, -ENOENT if the map is empty
+ */
+int tracing_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+{
+	int err;
+
+	rcu_read_lock();
+	err = map->ops->map_get_next_key(map, key, next_key);
+	rcu_read_unlock();
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(tracing_map_get_next_key);
+
 /* last field in 'union bpf_attr' used by this command */
 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
 
@@ -315,9 +456,7 @@ static int map_get_next_key(union bpf_attr *attr)
 	if (!next_key)
 		goto free_key;
 
-	rcu_read_lock();
-	err = map->ops->map_get_next_key(map, key, next_key);
-	rcu_read_unlock();
+	err = tracing_map_get_next_key(map, key, next_key);
 	if (err)
 		goto free_next_key;
 
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ