lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b20c1231-c8ef-4d66-97a9-120f2d77738e@linux.dev>
Date: Wed, 14 Jan 2026 10:44:27 -0800
From: Martin KaFai Lau <martin.lau@...ux.dev>
To: Leon Hwang <leon.hwang@...ux.dev>
Cc: Alexei Starovoitov <ast@...nel.org>,
 Daniel Borkmann <daniel@...earbox.net>, Andrii Nakryiko <andrii@...nel.org>,
 Eduard Zingerman <eddyz87@...il.com>, Song Liu <song@...nel.org>,
 Yonghong Song <yonghong.song@...ux.dev>,
 John Fastabend <john.fastabend@...il.com>, KP Singh <kpsingh@...nel.org>,
 Stanislav Fomichev <sdf@...ichev.me>, Hao Luo <haoluo@...gle.com>,
 Jiri Olsa <jolsa@...nel.org>, Shuah Khan <shuah@...nel.org>,
 Saket Kumar Bhaskar <skb99@...ux.ibm.com>,
 "David S . Miller" <davem@...emloft.net>, linux-kernel@...r.kernel.org,
 linux-kselftest@...r.kernel.org, kernel-patches-bot@...com,
 bpf@...r.kernel.org
Subject: Re: [PATCH bpf-next v3 1/5] bpf: lru: Tidy hash handling in LRU code



On 1/7/26 7:14 AM, Leon Hwang wrote:
> The hash field is not used by the LRU list itself.
> 
> Setting hash while manipulating the LRU list also obscures the intent
> of the code and makes it harder to follow.
> 
> Tidy this up by moving the hash assignment to prealloc_lru_pop(),
> where the element is prepared for insertion into the hash table.
> 
> Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
> ---
>   kernel/bpf/bpf_lru_list.c | 24 +++++++++---------------
>   kernel/bpf/bpf_lru_list.h |  5 ++---
>   kernel/bpf/hashtab.c      |  5 ++---
>   3 files changed, 13 insertions(+), 21 deletions(-)
> 
> diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
> index e7a2fc60523f..f4e183a9c28f 100644
> --- a/kernel/bpf/bpf_lru_list.c
> +++ b/kernel/bpf/bpf_lru_list.c
> @@ -344,10 +344,8 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
>   static void __local_list_add_pending(struct bpf_lru *lru,
>   				     struct bpf_lru_locallist *loc_l,
>   				     int cpu,
> -				     struct bpf_lru_node *node,
> -				     u32 hash)
> +				     struct bpf_lru_node *node)
>   {
> -	*(u32 *)((void *)node + lru->hash_offset) = hash;
>   	node->cpu = cpu;
>   	node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
>   	bpf_lru_node_clear_ref(node);
> @@ -393,8 +391,7 @@ __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
>   	return NULL;
>   }
>   
> -static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
> -						    u32 hash)
> +static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru)
>   {
>   	struct list_head *free_list;
>   	struct bpf_lru_node *node = NULL;
> @@ -415,7 +412,6 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
>   
>   	if (!list_empty(free_list)) {
>   		node = list_first_entry(free_list, struct bpf_lru_node, list);
> -		*(u32 *)((void *)node + lru->hash_offset) = hash;
>   		bpf_lru_node_clear_ref(node);
>   		__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);

init the hash value later (after releasing l->lock) is not correct. The 
node is in the inactive list. The inactive list is one of the rotate and 
_evict_ candidates, meaning tgt_l->hash will be used in 
htab_lru_map_delete_node(). In practice, it does not matter if 
htab_lru_map_delete_node() cannot find the node in an incorrect bucket. 
However, it still should not use an uninitialized value to begin with.

> index 441ff5bc54ac..c2d12db9036a 100644
> --- a/kernel/bpf/hashtab.c
> +++ b/kernel/bpf/hashtab.c
> @@ -296,12 +296,13 @@ static void htab_free_elems(struct bpf_htab *htab)
>   static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
>   					  u32 hash)
>   {
> -	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
> +	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru);
>   	struct htab_elem *l;
>   
>   	if (node) {
>   		bpf_map_inc_elem_count(&htab->map);
>   		l = container_of(node, struct htab_elem, lru_node);
> +		l->hash = hash;
>   		memcpy(l->key, key, htab->map.key_size);
>   		return l;
>   	}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ