[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190828072250.29828-2-jakub@cloudflare.com>
Date: Wed, 28 Aug 2019 09:22:39 +0200
From: Jakub Sitnicki <jakub@...udflare.com>
To: bpf@...r.kernel.org, netdev@...r.kernel.org
Cc: kernel-team@...udflare.com, Lorenz Bauer <lmb@...udflare.com>,
Marek Majkowski <marek@...udflare.com>
Subject: [RFCv2 bpf-next 01/12] flow_dissector: Extract attach/detach/query helpers
Move generic parts of callbacks for querying, attaching, and detaching a
single BPF program for reuse by other BPF program types.
Subsequent patch makes use of the extracted routines.
Reviewed-by: Lorenz Bauer <lmb@...udflare.com>
Signed-off-by: Jakub Sitnicki <jakub@...udflare.com>
---
include/linux/bpf.h | 8 +++++
net/core/filter.c | 73 +++++++++++++++++++++++++++++++++++++++
net/core/flow_dissector.c | 65 ++++++----------------------------
3 files changed, 92 insertions(+), 54 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5b9d22338606..b301e0c03a8c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,6 +23,7 @@ struct sock;
struct seq_file;
struct btf;
struct btf_type;
+struct mutex;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -1145,4 +1146,11 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
}
#endif /* CONFIG_INET */
+int bpf_prog_query_one(struct bpf_prog __rcu **pprog,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int bpf_prog_attach_one(struct bpf_prog __rcu **pprog, struct mutex *lock,
+ struct bpf_prog *prog, u32 flags);
+int bpf_prog_detach_one(struct bpf_prog __rcu **pprog, struct mutex *lock);
+
#endif /* _LINUX_BPF_H */
diff --git a/net/core/filter.c b/net/core/filter.c
index 0c1059cdad3d..a498fbaa2d50 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8668,6 +8668,79 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
return ret;
}
+int bpf_prog_query_one(struct bpf_prog __rcu **pprog,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+ u32 prog_id, prog_cnt = 0, flags = 0;
+ struct bpf_prog *attached;
+
+ if (attr->query.query_flags)
+ return -EINVAL;
+
+ rcu_read_lock();
+ attached = rcu_dereference(*pprog);
+ if (attached) {
+ prog_cnt = 1;
+ prog_id = attached->aux->id;
+ }
+ rcu_read_unlock();
+
+ if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
+ return -EFAULT;
+ if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
+ return -EFAULT;
+
+ if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
+ return 0;
+
+ if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int bpf_prog_attach_one(struct bpf_prog __rcu **pprog, struct mutex *lock,
+ struct bpf_prog *prog, u32 flags)
+{
+ struct bpf_prog *attached;
+
+ if (flags)
+ return -EINVAL;
+
+ mutex_lock(lock);
+ attached = rcu_dereference_protected(*pprog,
+ lockdep_is_held(lock));
+ if (attached) {
+ /* Only one BPF program can be attached at a time */
+ mutex_unlock(lock);
+ return -EEXIST;
+ }
+ rcu_assign_pointer(*pprog, prog);
+ mutex_unlock(lock);
+
+ return 0;
+}
+
+int bpf_prog_detach_one(struct bpf_prog __rcu **pprog, struct mutex *lock)
+{
+ struct bpf_prog *attached;
+
+ mutex_lock(lock);
+ attached = rcu_dereference_protected(*pprog,
+ lockdep_is_held(lock));
+ if (!attached) {
+ mutex_unlock(lock);
+ return -ENOENT;
+ }
+ RCU_INIT_POINTER(*pprog, NULL);
+ bpf_prog_put(attached);
+ mutex_unlock(lock);
+
+ return 0;
+}
+
#ifdef CONFIG_INET
struct sk_reuseport_kern {
struct sk_buff *skb;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9741b593ea53..c51602158906 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -73,80 +73,37 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
int skb_flow_dissector_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- u32 prog_id, prog_cnt = 0, flags = 0;
- struct bpf_prog *attached;
struct net *net;
-
- if (attr->query.query_flags)
- return -EINVAL;
+ int ret;
net = get_net_ns_by_fd(attr->query.target_fd);
if (IS_ERR(net))
return PTR_ERR(net);
- rcu_read_lock();
- attached = rcu_dereference(net->flow_dissector_prog);
- if (attached) {
- prog_cnt = 1;
- prog_id = attached->aux->id;
- }
- rcu_read_unlock();
+ ret = bpf_prog_query_one(&net->flow_dissector_prog, attr, uattr);
put_net(net);
-
- if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
- return -EFAULT;
- if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
- return -EFAULT;
-
- if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
- return 0;
-
- if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
- return -EFAULT;
-
- return 0;
+ return ret;
}
int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
struct bpf_prog *prog)
{
- struct bpf_prog *attached;
- struct net *net;
+ struct net *net = current->nsproxy->net_ns;
- net = current->nsproxy->net_ns;
- mutex_lock(&flow_dissector_mutex);
- attached = rcu_dereference_protected(net->flow_dissector_prog,
- lockdep_is_held(&flow_dissector_mutex));
- if (attached) {
- /* Only one BPF program can be attached at a time */
- mutex_unlock(&flow_dissector_mutex);
- return -EEXIST;
- }
- rcu_assign_pointer(net->flow_dissector_prog, prog);
- mutex_unlock(&flow_dissector_mutex);
- return 0;
+ return bpf_prog_attach_one(&net->flow_dissector_prog,
+ &flow_dissector_mutex, prog,
+ attr->attach_flags);
}
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
{
- struct bpf_prog *attached;
- struct net *net;
+ struct net *net = current->nsproxy->net_ns;
- net = current->nsproxy->net_ns;
- mutex_lock(&flow_dissector_mutex);
- attached = rcu_dereference_protected(net->flow_dissector_prog,
- lockdep_is_held(&flow_dissector_mutex));
- if (!attached) {
- mutex_unlock(&flow_dissector_mutex);
- return -ENOENT;
- }
- bpf_prog_put(attached);
- RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
- mutex_unlock(&flow_dissector_mutex);
- return 0;
+ return bpf_prog_detach_one(&net->flow_dissector_prog,
+ &flow_dissector_mutex);
}
+
/**
* skb_flow_get_be16 - extract be16 entity
* @skb: sk_buff to extract from
--
2.20.1
Powered by blists - more mailing lists