[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200408232523.2675550-1-yhs@fb.com>
Date: Wed, 8 Apr 2020 16:25:23 -0700
From: Yonghong Song <yhs@...com>
To: Andrii Nakryiko <andriin@...com>, <bpf@...r.kernel.org>,
Martin KaFai Lau <kafai@...com>, <netdev@...r.kernel.org>
CC: Alexei Starovoitov <ast@...com>,
Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>
Subject: [RFC PATCH bpf-next 03/16] bpf: provide a way for targets to register themselves
Here, the target refers to a particular data structure
inside the kernel we want to dump. For example, it
can be all task_structs in the current pid namespace,
or it could be all open files for all task_structs
in the current pid namespace.
Each target is identified with the following information:
target_rel_path <=== relative path to /sys/kernel/bpfdump
target_proto <=== kernel func proto which represents
bpf program signature for this target
seq_ops <=== seq_ops for seq_file operations
seq_priv_size <=== seq_file private data size
target_feature <=== target specific feature which needs
handling outside seq_ops.
The target relative path is a relative directory to /sys/kernel/bpfdump/.
For example, it could be:
task <=== all tasks
task/file <=== all open files under all tasks
ipv6_route <=== all ipv6_routes
tcp6/sk_local_storage <=== all tcp6 socket local storages
foo/bar/tar <=== all tar's in bar in foo
The "target_feature" is mostly used for reusing existing seq_ops.
For example, for /proc/net/<> stats, the "net" namespace is often
stored in file private data. The target_feature enables bpf based
dumper to set "net" properly for itself before calling shared
seq_ops.
bpf_dump_reg_target() is implemented so targets
can register themselves. Currently, module is not
supported, so there is no bpf_dump_unreg_target().
The main reason is that BTF is not available for modules
yet.
Since target might call bpf_dump_reg_target() before
bpfdump mount point is created, __bpfdump_init()
may be called in bpf_dump_reg_target() as well.
The file-based dumpers will be regular files under
the specific target directory. For example,
task/my1 <=== dumper "my1" iterates through all tasks
task/file/my2 <=== dumper "my2" iterates through all open files
under all tasks
Signed-off-by: Yonghong Song <yhs@...com>
---
include/linux/bpf.h | 4 +
kernel/bpf/dump.c | 190 +++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 193 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index fd2b2322412d..53914bec7590 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1109,6 +1109,10 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd);
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname, int flags);
+int bpf_dump_reg_target(const char *target, const char *target_proto,
+ const struct seq_operations *seq_ops,
+ u32 seq_priv_size, u32 target_feature);
+
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
diff --git a/kernel/bpf/dump.c b/kernel/bpf/dump.c
index e0c33486e0e7..45528846557f 100644
--- a/kernel/bpf/dump.c
+++ b/kernel/bpf/dump.c
@@ -12,6 +12,173 @@
#include <linux/filter.h>
#include <linux/bpf.h>
+struct bpfdump_target_info {
+ struct list_head list;
+ const char *target;
+ const char *target_proto;
+ struct dentry *dir_dentry;
+ const struct seq_operations *seq_ops;
+ u32 seq_priv_size;
+ u32 target_feature;
+};
+
+struct bpfdump_targets {
+ struct list_head dumpers;
+ struct mutex dumper_mutex;
+};
+
+/* registered dump targets */
+static struct bpfdump_targets dump_targets;
+
+static struct dentry *bpfdump_dentry;
+
+static struct dentry *bpfdump_add_dir(const char *name, struct dentry *parent,
+ const struct inode_operations *i_ops,
+ void *data);
+static int __bpfdump_init(void);
+
+static int dumper_unlink(struct inode *dir, struct dentry *dentry)
+{
+ kfree(d_inode(dentry)->i_private);
+ return simple_unlink(dir, dentry);
+}
+
+static const struct inode_operations bpf_dir_iops = {
+ .lookup = simple_lookup,
+ .unlink = dumper_unlink,
+};
+
+int bpf_dump_reg_target(const char *target,
+ const char *target_proto,
+ const struct seq_operations *seq_ops,
+ u32 seq_priv_size, u32 target_feature)
+{
+ struct bpfdump_target_info *tinfo, *ptinfo;
+ struct dentry *dentry, *parent;
+ const char *lastslash;
+ bool existed = false;
+ int err, parent_len;
+
+ if (!bpfdump_dentry) {
+ err = __bpfdump_init();
+ if (err)
+ return err;
+ }
+
+ tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ if (!tinfo)
+ return -ENOMEM;
+
+ tinfo->target = target;
+ tinfo->target_proto = target_proto;
+ tinfo->seq_ops = seq_ops;
+ tinfo->seq_priv_size = seq_priv_size;
+ tinfo->target_feature = target_feature;
+ INIT_LIST_HEAD(&tinfo->list);
+
+ lastslash = strrchr(target, '/');
+ if (!lastslash) {
+ parent = bpfdump_dentry;
+ } else {
+ parent_len = (unsigned long)lastslash - (unsigned long)target;
+
+ mutex_lock(&dump_targets.dumper_mutex);
+ list_for_each_entry(ptinfo, &dump_targets.dumpers, list) {
+ if (strlen(ptinfo->target) == parent_len &&
+ strncmp(ptinfo->target, target, parent_len) == 0) {
+ existed = true;
+ break;
+ }
+ }
+ mutex_unlock(&dump_targets.dumper_mutex);
+ if (existed == false) {
+ err = -ENOENT;
+ goto free_tinfo;
+ }
+
+ parent = ptinfo->dir_dentry;
+ target = lastslash + 1;
+ }
+ dentry = bpfdump_add_dir(target, parent, &bpf_dir_iops, tinfo);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto free_tinfo;
+ }
+
+ tinfo->dir_dentry = dentry;
+
+ mutex_lock(&dump_targets.dumper_mutex);
+ list_add(&tinfo->list, &dump_targets.dumpers);
+ mutex_unlock(&dump_targets.dumper_mutex);
+ return 0;
+
+free_tinfo:
+ kfree(tinfo);
+ return err;
+}
+
+static struct dentry *
+bpfdump_create_dentry(const char *name, umode_t mode, struct dentry *parent,
+ void *data, const struct inode_operations *i_ops,
+ const struct file_operations *f_ops)
+{
+ struct inode *dir, *inode;
+ struct dentry *dentry;
+ int err;
+
+ dir = d_inode(parent);
+
+ inode_lock(dir);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ goto unlock;
+
+ if (d_really_is_positive(dentry)) {
+ err = -EEXIST;
+ goto dentry_put;
+ }
+
+ inode = new_inode(dir->i_sb);
+ if (!inode) {
+ err = -ENOMEM;
+ goto dentry_put;
+ }
+
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_private = data;
+
+ if (S_ISDIR(mode)) {
+ inode->i_op = i_ops;
+ inode->i_fop = f_ops;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ } else {
+ inode->i_fop = f_ops;
+ }
+
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ inode_unlock(dir);
+ return dentry;
+
+dentry_put:
+ dput(dentry);
+ dentry = ERR_PTR(err);
+unlock:
+ inode_unlock(dir);
+ return dentry;
+}
+
+static struct dentry *
+bpfdump_add_dir(const char *name, struct dentry *parent,
+ const struct inode_operations *i_ops, void *data)
+{
+ return bpfdump_create_dentry(name, S_IFDIR | 0755, parent,
+ data, i_ops, &simple_dir_operations);
+}
+
static void bpfdump_free_inode(struct inode *inode)
{
kfree(inode->i_private);
@@ -58,8 +225,10 @@ static struct file_system_type fs_type = {
.kill_sb = kill_litter_super,
};
-static int __init bpfdump_init(void)
+static int __bpfdump_init(void)
{
+ struct vfsmount *mount;
+ int mount_count;
int ret;
ret = sysfs_create_mount_point(kernel_kobj, "bpfdump");
@@ -70,10 +239,29 @@ static int __init bpfdump_init(void)
if (ret)
goto remove_mount;
+ /* get a reference to mount so we can populate targets
+ * at init time.
+ */
+ ret = simple_pin_fs(&fs_type, &mount, &mount_count);
+ if (ret)
+ goto remove_mount;
+
+ bpfdump_dentry = mount->mnt_root;
+
+ INIT_LIST_HEAD(&dump_targets.dumpers);
+ mutex_init(&dump_targets.dumper_mutex);
return 0;
remove_mount:
sysfs_remove_mount_point(kernel_kobj, "bpfdump");
return ret;
}
+
+static int __init bpfdump_init(void)
+{
+ if (bpfdump_dentry)
+ return 0;
+
+ return __bpfdump_init();
+}
core_initcall(bpfdump_init);
--
2.24.1
Powered by blists - more mailing lists