lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250529000348.upto3ztve36ccamv@gmail.com>
Date: Wed, 28 May 2025 17:04:05 -0700
From: John Fastabend <john.fastabend@...il.com>
To: Cong Wang <xiyou.wangcong@...il.com>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org, zhoufeng.zf@...edance.com,
	jakub@...udflare.com, zijianzhang@...edance.com,
	Cong Wang <cong.wang@...edance.com>
Subject: Re: [Patch bpf-next v3 2/4] skmsg: implement slab allocator cache
 for sk_msg

On 2025-05-19 13:36:26, Cong Wang wrote:
> From: Zijian Zhang <zijianzhang@...edance.com>
> 
> Optimizing redirect ingress performance requires frequent allocation and
> deallocation of sk_msg structures. Introduce a dedicated kmem_cache for
> sk_msg to reduce memory allocation overhead and improve performance.
> 
> Reviewed-by: Cong Wang <cong.wang@...edance.com>
> Signed-off-by: Zijian Zhang <zijianzhang@...edance.com>
> ---
>  include/linux/skmsg.h | 21 ++++++++++++---------
>  net/core/skmsg.c      | 28 +++++++++++++++++++++-------
>  net/ipv4/tcp_bpf.c    |  5 ++---
>  3 files changed, 35 insertions(+), 19 deletions(-)
> 
> diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
> index d6f0a8cd73c4..bf28ce9b5fdb 100644
> --- a/include/linux/skmsg.h
> +++ b/include/linux/skmsg.h
> @@ -121,6 +121,7 @@ struct sk_psock {
>  	struct rcu_work			rwork;
>  };
>  
> +struct sk_msg *sk_msg_alloc(gfp_t gfp);
>  int sk_msg_expand(struct sock *sk, struct sk_msg *msg, int len,
>  		  int elem_first_coalesce);
>  int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
> @@ -143,6 +144,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
>  		   int len, int flags);
>  bool sk_msg_is_readable(struct sock *sk);
>  
> +extern struct kmem_cache *sk_msg_cachep;
> +
>  static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
>  {
>  	WARN_ON(i == msg->sg.end && bytes);
> @@ -319,6 +322,13 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
>  	kfree_skb(skb);
>  }
>  
> +static inline void kfree_sk_msg(struct sk_msg *msg)
> +{
> +	if (msg->skb)
> +		consume_skb(msg->skb);
> +	kmem_cache_free(sk_msg_cachep, msg);
> +}
> +
>  static inline bool sk_psock_queue_msg(struct sk_psock *psock,
>  				      struct sk_msg *msg)
>  {
> @@ -330,7 +340,7 @@ static inline bool sk_psock_queue_msg(struct sk_psock *psock,
>  		ret = true;
>  	} else {
>  		sk_msg_free(psock->sk, msg);
> -		kfree(msg);
> +		kfree_sk_msg(msg);

Isn't this a potential use after free on msg->skb? The sk_msg_free() a
line above will consume_skb() if it exists and its not nil set so we would
consume_skb() again?

>  		ret = false;
>  	}
>  	spin_unlock_bh(&psock->ingress_lock);
> @@ -378,13 +388,6 @@ static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
>  	return psock ? list_empty(&psock->ingress_msg) : true;
>  }
>  
> -static inline void kfree_sk_msg(struct sk_msg *msg)
> -{
> -	if (msg->skb)
> -		consume_skb(msg->skb);
> -	kfree(msg);
> -}
> -
>  static inline void sk_psock_report_error(struct sk_psock *psock, int err)
>  {
>  	struct sock *sk = psock->sk;
> @@ -441,7 +444,7 @@ static inline void sk_psock_cork_free(struct sk_psock *psock)
>  {
>  	if (psock->cork) {
>  		sk_msg_free(psock->sk, psock->cork);
> -		kfree(psock->cork);
> +		kfree_sk_msg(psock->cork);

Same here.

>  		psock->cork = NULL;
>  	}
>  }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ