[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAPhsuW7dyt6x58YYyxM2qqunoHXuF1mFcUzz74M28wT7E2tqzA@mail.gmail.com>
Date: Mon, 24 Sep 2018 23:05:48 -0700
From: Song Liu <liu.song.a23@...il.com>
To: guro@...com
Cc: Networking <netdev@...r.kernel.org>,
open list <linux-kernel@...r.kernel.org>, Kernel-team@...com,
Daniel Borkmann <daniel@...earbox.net>,
Alexei Starovoitov <ast@...nel.org>
Subject: Re: [PATCH bpf-next 2/9] bpf: rework cgroup storage pointer passing
On Fri, Sep 21, 2018 at 10:16 AM Roman Gushchin <guro@...com> wrote:
>
> To simplify the following introduction of per-cpu cgroup storage,
> let's rework a bit a mechanism of passing a pointer to a cgroup
> storage into the bpf_get_local_storage(). Let's save a pointer
> to the corresponding bpf_cgroup_storage structure, instead of
> a pointer to the actual buffer.
>
> It will help us to handle per-cpu storage later, which has
> a different way of accessing to the actual data.
>
> Signed-off-by: Roman Gushchin <guro@...com>
> Cc: Daniel Borkmann <daniel@...earbox.net>
> Cc: Alexei Starovoitov <ast@...nel.org>
Acked-by: Song Liu <songliubraving@...com>
> ---
> include/linux/bpf-cgroup.h | 13 ++++---------
> kernel/bpf/helpers.c | 8 ++++++--
> kernel/bpf/local_storage.c | 3 ++-
> 3 files changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> index e9871b012dac..7e0c9a1d48b7 100644
> --- a/include/linux/bpf-cgroup.h
> +++ b/include/linux/bpf-cgroup.h
> @@ -23,7 +23,8 @@ struct bpf_cgroup_storage;
> extern struct static_key_false cgroup_bpf_enabled_key;
> #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
>
> -DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
> +DECLARE_PER_CPU(struct bpf_cgroup_storage*,
> + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
>
> #define for_each_cgroup_storage_type(stype) \
> for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
> @@ -115,15 +116,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
> *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> {
> enum bpf_cgroup_storage_type stype;
> - struct bpf_storage_buffer *buf;
> -
> - for_each_cgroup_storage_type(stype) {
> - if (!storage[stype])
> - continue;
>
> - buf = READ_ONCE(storage[stype]->buf);
> - this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]);
> - }
> + for_each_cgroup_storage_type(stype)
> + this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
> }
>
> struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 9070b2ace6aa..e42f8789b7ea 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -195,7 +195,8 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
> };
>
> #ifdef CONFIG_CGROUP_BPF
> -DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
> +DECLARE_PER_CPU(struct bpf_cgroup_storage*,
> + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
>
> BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
> {
> @@ -204,8 +205,11 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
> * verifier checks that its value is correct.
> */
> enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
> + struct bpf_cgroup_storage *storage;
>
> - return (unsigned long) this_cpu_read(bpf_cgroup_storage[stype]);
> + storage = this_cpu_read(bpf_cgroup_storage[stype]);
> +
> + return (unsigned long)&READ_ONCE(storage->buf)->data[0];
> }
>
> const struct bpf_func_proto bpf_get_local_storage_proto = {
> diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
> index 0bd9f19fc557..6742292fb39e 100644
> --- a/kernel/bpf/local_storage.c
> +++ b/kernel/bpf/local_storage.c
> @@ -7,7 +7,8 @@
> #include <linux/rbtree.h>
> #include <linux/slab.h>
>
> -DEFINE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
> +DEFINE_PER_CPU(struct bpf_cgroup_storage*,
> + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
>
> #ifdef CONFIG_CGROUP_BPF
>
> --
> 2.17.1
>
Powered by blists - more mailing lists