lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <64c9285b927f8_1c2791294e4@willemb.c.googlers.com.notmuch>
Date: Tue, 01 Aug 2023 11:44:27 -0400
From: Willem de Bruijn <willemdebruijn.kernel@...il.com>
To: Eric Dumazet <edumazet@...gle.com>, 
 "David S . Miller" <davem@...emloft.net>, 
 Jakub Kicinski <kuba@...nel.org>, 
 Paolo Abeni <pabeni@...hat.com>
Cc: Willem de Bruijn <willemb@...gle.com>, 
 Tahsin Erdogan <trdgn@...zon.com>, 
 netdev@...r.kernel.org, 
 eric.dumazet@...il.com, 
 Eric Dumazet <edumazet@...gle.com>
Subject: RE: [PATCH net-next 1/4] net: allow alloc_skb_with_frags() to
 allocate bigger packets

Eric Dumazet wrote:
> Refactor alloc_skb_with_frags() to allow bigger packets allocations.
> 
> Instead of assuming that only order-0 allocations will be attempted,
> use the caller supplied max order.
> 
> Signed-off-by: Eric Dumazet <edumazet@...gle.com>
> Cc: Tahsin Erdogan <trdgn@...zon.com>
> ---
>  net/core/skbuff.c | 56 +++++++++++++++++++++--------------------------
>  1 file changed, 25 insertions(+), 31 deletions(-)
> 
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index a298992060e6efdecb87c7ffc8290eafe330583f..0ac70a0144a7c1f4e7824ddc19980aee73e4c121 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -6204,7 +6204,7 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
>   *
>   * @header_len: size of linear part
>   * @data_len: needed length in frags
> - * @max_page_order: max page order desired.
> + * @order: max page order desired.
>   * @errcode: pointer to error code if any
>   * @gfp_mask: allocation mask
>   *
> @@ -6212,21 +6212,17 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
>   */
>  struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
>  				     unsigned long data_len,
> -				     int max_page_order,
> +				     int order,
>  				     int *errcode,
>  				     gfp_t gfp_mask)
>  {
> -	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
>  	unsigned long chunk;
>  	struct sk_buff *skb;
>  	struct page *page;
> -	int i;
> +	int nr_frags = 0;
>  
>  	*errcode = -EMSGSIZE;
> -	/* Note this test could be relaxed, if we succeed to allocate
> -	 * high order pages...
> -	 */
> -	if (npages > MAX_SKB_FRAGS)
> +	if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
>  		return NULL;
>  
>  	*errcode = -ENOBUFS;
> @@ -6234,34 +6230,32 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
>  	if (!skb)
>  		return NULL;
>  
> -	skb->truesize += npages << PAGE_SHIFT;
> -
> -	for (i = 0; npages > 0; i++) {
> -		int order = max_page_order;
> -
> -		while (order) {
> -			if (npages >= 1 << order) {
> -				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
> -						   __GFP_COMP |
> -						   __GFP_NOWARN,
> -						   order);
> -				if (page)
> -					goto fill_page;
> -				/* Do not retry other high order allocations */

Is this heuristic to only try one type of compound pages and else
fall back onto regular pages still relevant? I don't know the story
behind it.

> -				order = 1;
> -				max_page_order = 0;
> -			}
> +	while (data_len) {
> +		if (nr_frags == MAX_SKB_FRAGS - 1)
> +			goto failure;
> +		while (order && data_len < (PAGE_SIZE << order))
>  			order--;

Why decrement order on every iteration through the loop, not just when
alloc_pages fails?

> +
> +		if (order) {
> +			page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
> +					   __GFP_COMP |
> +					   __GFP_NOWARN,
> +					   order);
> +			if (!page) {
> +				order--;
> +				continue;
> +			}
> +		} else {
> +			page = alloc_page(gfp_mask);
> +			if (!page)
> +				goto failure;
>  		}
> -		page = alloc_page(gfp_mask);
> -		if (!page)
> -			goto failure;
> -fill_page:
>  		chunk = min_t(unsigned long, data_len,
>  			      PAGE_SIZE << order);
> -		skb_fill_page_desc(skb, i, page, 0, chunk);
> +		skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
> +		nr_frags++;
> +		skb->truesize += (PAGE_SIZE << order);
>  		data_len -= chunk;
> -		npages -= 1 << order;
>  	}
>  	return skb;
>  
> -- 
> 2.41.0.585.gd2178a4bd4-goog
> 



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ