[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190522205353.140648-3-sdf@google.com>
Date: Wed, 22 May 2019 13:53:52 -0700
From: Stanislav Fomichev <sdf@...gle.com>
To: netdev@...r.kernel.org, bpf@...r.kernel.org
Cc: davem@...emloft.net, ast@...nel.org, daniel@...earbox.net,
Stanislav Fomichev <sdf@...gle.com>,
Roman Gushchin <guro@...com>
Subject: [PATCH bpf-next v2 3/4] bpf: cgroup: properly use bpf_prog_array api
Now that we don't have __rcu markers on the bpf_prog_array helpers,
let's use proper rcu_dereference_protected to obtain array pointer
under mutex.
We also don't need __rcu annotations on cgroup_bpf.inactive since
it's not read/updated concurrently.
v2:
* replace xchg with rcu_swap_protected
Cc: Roman Gushchin <guro@...com>
Signed-off-by: Stanislav Fomichev <sdf@...gle.com>
---
include/linux/bpf-cgroup.h | 2 +-
kernel/bpf/cgroup.c | 30 +++++++++++++++++++-----------
2 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index cb3c6b3b89c8..94a7bca3a6c4 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -71,7 +71,7 @@ struct cgroup_bpf {
u32 flags[MAX_BPF_ATTACH_TYPE];
/* temp storage for effective prog array used by prog_attach/detach */
- struct bpf_prog_array __rcu *inactive;
+ struct bpf_prog_array *inactive;
};
void cgroup_bpf_put(struct cgroup *cgrp);
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index fcde0f7b2585..67525683e982 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -22,6 +22,12 @@
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+#define cgroup_rcu_dereference(p) \
+ rcu_dereference_protected(p, lockdep_is_held(&cgroup_mutex))
+
+#define cgroup_rcu_swap(rcu_ptr, ptr) \
+ rcu_swap_protected(rcu_ptr, ptr, lockdep_is_held(&cgroup_mutex))
+
/**
* cgroup_bpf_put() - put references of all bpf programs
* @cgrp: the cgroup to modify
@@ -29,6 +35,7 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
void cgroup_bpf_put(struct cgroup *cgrp)
{
enum bpf_cgroup_storage_type stype;
+ struct bpf_prog_array *old_array;
unsigned int type;
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
@@ -45,7 +52,8 @@ void cgroup_bpf_put(struct cgroup *cgrp)
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
- bpf_prog_array_free(cgrp->bpf.effective[type]);
+ old_array = cgroup_rcu_dereference(cgrp->bpf.effective[type]);
+ bpf_prog_array_free(old_array);
}
}
@@ -101,7 +109,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp,
*/
static int compute_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
- struct bpf_prog_array __rcu **array)
+ struct bpf_prog_array **array)
{
enum bpf_cgroup_storage_type stype;
struct bpf_prog_array *progs;
@@ -139,17 +147,15 @@ static int compute_effective_progs(struct cgroup *cgrp,
}
} while ((p = cgroup_parent(p)));
- rcu_assign_pointer(*array, progs);
+ *array = progs;
return 0;
}
static void activate_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
- struct bpf_prog_array __rcu *array)
+ struct bpf_prog_array *old_array)
{
- struct bpf_prog_array __rcu *old_array;
-
- old_array = xchg(&cgrp->bpf.effective[type], array);
+ cgroup_rcu_swap(cgrp->bpf.effective[type], old_array);
/* free prog array after grace period, since __cgroup_bpf_run_*()
* might be still walking the array
*/
@@ -166,7 +172,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
* that array below is variable length
*/
#define NR ARRAY_SIZE(cgrp->bpf.effective)
- struct bpf_prog_array __rcu *arrays[NR] = {};
+ struct bpf_prog_array *arrays[NR] = {};
int i;
for (i = 0; i < NR; i++)
@@ -444,10 +450,13 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
enum bpf_attach_type type = attr->query.attach_type;
struct list_head *progs = &cgrp->bpf.progs[type];
u32 flags = cgrp->bpf.flags[type];
+ struct bpf_prog_array *effective;
int cnt, ret = 0, i;
+ effective = cgroup_rcu_dereference(cgrp->bpf.effective[type]);
+
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
- cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
+ cnt = bpf_prog_array_length(effective);
else
cnt = prog_list_length(progs);
@@ -464,8 +473,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
}
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
- return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
- prog_ids, cnt);
+ return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
} else {
struct bpf_prog_list *pl;
u32 id;
--
2.21.0.1020.gf2820cf01a-goog
Powered by blists - more mailing lists