[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210610193326.p6x3t2c26jitsjob@kafai-mbp>
Date: Thu, 10 Jun 2021 12:33:26 -0700
From: Martin KaFai Lau <kafai@...com>
To: Toke Høiland-Jørgensen <toke@...hat.com>
CC: <bpf@...r.kernel.org>, <netdev@...r.kernel.org>,
Hangbin Liu <liuhangbin@...il.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Magnus Karlsson <magnus.karlsson@...il.com>,
"Paul E . McKenney" <paulmck@...nel.org>
Subject: Re: [PATCH bpf-next 02/17] bpf: allow RCU-protected lookups to
happen from bh context
On Wed, Jun 09, 2021 at 12:33:11PM +0200, Toke Høiland-Jørgensen wrote:
> XDP programs are called from a NAPI poll context, which means the RCU
> reference liveness is ensured by local_bh_disable(). Add
> rcu_read_lock_bh_held() as a condition to the RCU checks for map lookups so
> lockdep understands that the dereferences are safe from inside *either* an
> rcu_read_lock() section *or* a local_bh_disable() section. This is done in
> preparation for removing the redundant rcu_read_lock()s from the drivers.
>
> Signed-off-by: Toke Høiland-Jørgensen <toke@...hat.com>
> ---
> kernel/bpf/hashtab.c | 21 ++++++++++++++-------
> kernel/bpf/helpers.c | 6 +++---
> kernel/bpf/lpm_trie.c | 6 ++++--
> 3 files changed, 21 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
> index 6f6681b07364..72c58cc516a3 100644
> --- a/kernel/bpf/hashtab.c
> +++ b/kernel/bpf/hashtab.c
> @@ -596,7 +596,8 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
> struct htab_elem *l;
> u32 hash, key_size;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> @@ -989,7 +990,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
> /* unknown flags */
> return -EINVAL;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> @@ -1082,7 +1084,8 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
> /* unknown flags */
> return -EINVAL;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> @@ -1148,7 +1151,8 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
> /* unknown flags */
> return -EINVAL;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> @@ -1202,7 +1206,8 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
> /* unknown flags */
> return -EINVAL;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> @@ -1276,7 +1281,8 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
> u32 hash, key_size;
> int ret;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> @@ -1311,7 +1317,8 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
> u32 hash, key_size;
> int ret;
>
> - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
> + !rcu_read_lock_bh_held());
>
> key_size = map->key_size;
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 544773970dbc..e880f6bb6f28 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -28,7 +28,7 @@
> */
> BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
> {
> - WARN_ON_ONCE(!rcu_read_lock_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
There is a discrepancy in rcu_read_lock_trace_held() here but
I think the patch_map_ops_generic step in the verifier has skipped
these helper calls. It is unrelated and can be addressed later
until it is needed.
Acked-by: Martin KaFai Lau <kafai@...com>
> return (unsigned long) map->ops->map_lookup_elem(map, key);
> }
>
> @@ -44,7 +44,7 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
> BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
> void *, value, u64, flags)
> {
> - WARN_ON_ONCE(!rcu_read_lock_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
> return map->ops->map_update_elem(map, key, value, flags);
> }
>
> @@ -61,7 +61,7 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
>
> BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
> {
> - WARN_ON_ONCE(!rcu_read_lock_held());
> + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
> return map->ops->map_delete_elem(map, key);
> }
Powered by blists - more mailing lists