lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <878qng7i63.fsf@toke.dk>
Date: Thu, 01 May 2025 12:13:24 +0200
From: Toke Høiland-Jørgensen <toke@...hat.com>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
 netdev@...r.kernel.org, linux-rt-devel@...ts.linux.dev
Cc: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
 <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
 <pabeni@...hat.com>, Simon Horman <horms@...nel.org>, Thomas Gleixner
 <tglx@...utronix.de>, Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
 Andrew Lunn <andrew+netdev@...n.ch>, Alexei Starovoitov <ast@...nel.org>,
 Daniel Borkmann <daniel@...earbox.net>, Jesper Dangaard Brouer
 <hawk@...nel.org>, John Fastabend <john.fastabend@...il.com>
Subject: Re: [PATCH net-next v3 05/18] xdp: Use nested-BH locking for
 system_page_pool

Sebastian Andrzej Siewior <bigeasy@...utronix.de> writes:

> system_page_pool is a per-CPU variable and relies on disabled BH for its
> locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
> this data structure requires explicit locking.
>
> Make a struct with a page_pool member (original system_page_pool) and a
> local_lock_t and use local_lock_nested_bh() for locking. This change
> adds only lockdep coverage and does not alter the functional behaviour
> for !PREEMPT_RT.
>
> Cc: Andrew Lunn <andrew+netdev@...n.ch>
> Cc: Alexei Starovoitov <ast@...nel.org>
> Cc: Daniel Borkmann <daniel@...earbox.net>
> Cc: Jesper Dangaard Brouer <hawk@...nel.org>
> Cc: John Fastabend <john.fastabend@...il.com>
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
> ---
>  include/linux/netdevice.h |  7 ++++++-
>  net/core/dev.c            | 15 ++++++++++-----
>  net/core/xdp.c            | 11 +++++++++--
>  3 files changed, 25 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> index 2d11d013cabed..2018e2432cb56 100644
> --- a/include/linux/netdevice.h
> +++ b/include/linux/netdevice.h
> @@ -3502,7 +3502,12 @@ struct softnet_data {
>  };
>  
>  DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
> -DECLARE_PER_CPU(struct page_pool *, system_page_pool);
> +
> +struct page_pool_bh {
> +	struct page_pool *pool;
> +	local_lock_t bh_lock;
> +};
> +DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
>  
>  #ifndef CONFIG_PREEMPT_RT
>  static inline int dev_recursion_level(void)
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 1be7cb73a6024..b56becd070bc7 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -462,7 +462,9 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
>   * PP consumers must pay attention to run APIs in the appropriate context
>   * (e.g. NAPI context).
>   */
> -DEFINE_PER_CPU(struct page_pool *, system_page_pool);
> +DEFINE_PER_CPU(struct page_pool_bh, system_page_pool) = {
> +	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
> +};

I'm a little fuzzy on how DEFINE_PER_CPU() works, but does this
initialisation automatically do the right thing with the multiple
per-CPU instances?

>  #ifdef CONFIG_LOCKDEP
>  /*
> @@ -5238,7 +5240,10 @@ netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog)
>  	struct sk_buff *skb = *pskb;
>  	int err, hroom, troom;
>  
> -	if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
> +	local_lock_nested_bh(&system_page_pool.bh_lock);
> +	err = skb_cow_data_for_xdp(this_cpu_read(system_page_pool.pool), pskb, prog);
> +	local_unlock_nested_bh(&system_page_pool.bh_lock);
> +	if (!err)
>  		return 0;
>  
>  	/* In case we have to go down the path and also linearize,
> @@ -12629,7 +12634,7 @@ static int net_page_pool_create(int cpuid)
>  		return err;
>  	}
>  
> -	per_cpu(system_page_pool, cpuid) = pp_ptr;
> +	per_cpu(system_page_pool.pool, cpuid) = pp_ptr;
>  #endif
>  	return 0;
>  }
> @@ -12759,13 +12764,13 @@ static int __init net_dev_init(void)
>  		for_each_possible_cpu(i) {
>  			struct page_pool *pp_ptr;
>  
> -			pp_ptr = per_cpu(system_page_pool, i);
> +			pp_ptr = per_cpu(system_page_pool.pool, i);
>  			if (!pp_ptr)
>  				continue;
>  
>  			xdp_unreg_page_pool(pp_ptr);
>  			page_pool_destroy(pp_ptr);
> -			per_cpu(system_page_pool, i) = NULL;
> +			per_cpu(system_page_pool.pool, i) = NULL;
>  		}
>  	}
>  
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index f86eedad586a7..b2a5c934fe7b7 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -737,10 +737,10 @@ static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
>   */
>  struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
>  {
> -	struct page_pool *pp = this_cpu_read(system_page_pool);
>  	const struct xdp_rxq_info *rxq = xdp->rxq;
>  	u32 len = xdp->data_end - xdp->data_meta;
>  	u32 truesize = xdp->frame_sz;
> +	struct page_pool *pp;
>  	struct sk_buff *skb;
>  	int metalen;
>  	void *data;
> @@ -748,13 +748,18 @@ struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
>  	if (!IS_ENABLED(CONFIG_PAGE_POOL))
>  		return NULL;
>  
> +	local_lock_nested_bh(&system_page_pool.bh_lock);
> +	pp = this_cpu_read(system_page_pool.pool);
>  	data = page_pool_dev_alloc_va(pp, &truesize);
> -	if (unlikely(!data))
> +	if (unlikely(!data)) {
> +		local_unlock_nested_bh(&system_page_pool.bh_lock);
>  		return NULL;
> +	}
>  
>  	skb = napi_build_skb(data, truesize);
>  	if (unlikely(!skb)) {
>  		page_pool_free_va(pp, data, true);
> +		local_unlock_nested_bh(&system_page_pool.bh_lock);
>  		return NULL;
>  	}
>  
> @@ -773,9 +778,11 @@ struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
>  
>  	if (unlikely(xdp_buff_has_frags(xdp)) &&
>  	    unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) {
> +		local_unlock_nested_bh(&system_page_pool.bh_lock);
>  		napi_consume_skb(skb, true);
>  		return NULL;
>  	}
> +	local_unlock_nested_bh(&system_page_pool.bh_lock);

Hmm, instead of having four separate unlock calls in this function, how
about initialising skb = NULL, and having the unlock call just above
'return skb' with an out: label?

Then the three topmost 'return NULL' can just straight-forwardly be
replaced with 'goto out', while the last one becomes 'skb = NULL; goto
out;'. I think that would be more readable than this repetition.

-Toke


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ