[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Thu, 2 Feb 2023 15:09:37 -0500
From: Soheil Hassas Yeganeh <soheil@...gle.com>
To: Eric Dumazet <edumazet@...gle.com>
Cc: "David S . Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Alexander Duyck <alexanderduyck@...com>
Subject: Re: [PATCH net-next 3/4] net: factorize code in kmalloc_reserve()
On Thu, Feb 2, 2023 at 1:58 PM Eric Dumazet <edumazet@...gle.com> wrote:
>
> All kmalloc_reserve() callers have to make the same computation,
> we can factorize them, to prepare following patch in the series.
>
> Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Acked-by: Soheil Hassas Yeganeh <soheil@...gle.com>
> ---
> net/core/skbuff.c | 27 +++++++++++----------------
> 1 file changed, 11 insertions(+), 16 deletions(-)
>
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index a82df5289208d69716e60c5c1f201ec3ca50a258..ae0b2aa1f01e8060cc4fe69137e9bd98e44280cc 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -478,17 +478,20 @@ EXPORT_SYMBOL(napi_build_skb);
> * may be used. Otherwise, the packet data may be discarded until enough
> * memory is free
> */
> -static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
> +static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
> bool *pfmemalloc)
> {
> - void *obj;
> bool ret_pfmemalloc = false;
> + unsigned int obj_size;
> + void *obj;
>
> + obj_size = SKB_HEAD_ALIGN(*size);
> + *size = obj_size = kmalloc_size_roundup(obj_size);
> /*
> * Try a regular allocation, when that fails and we're not entitled
> * to the reserves, fail.
> */
> - obj = kmalloc_node_track_caller(size,
> + obj = kmalloc_node_track_caller(obj_size,
> flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
> node);
> if (obj || !(gfp_pfmemalloc_allowed(flags)))
> @@ -496,7 +499,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
>
> /* Try again but now we are using pfmemalloc reserves */
> ret_pfmemalloc = true;
> - obj = kmalloc_node_track_caller(size, flags, node);
> + obj = kmalloc_node_track_caller(obj_size, flags, node);
>
> out:
> if (pfmemalloc)
> @@ -557,9 +560,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
> * aligned memory blocks, unless SLUB/SLAB debug is enabled.
> * Both skb->head and skb_shared_info are cache line aligned.
> */
> - size = SKB_HEAD_ALIGN(size);
> - size = kmalloc_size_roundup(size);
> - data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
> + data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
> if (unlikely(!data))
> goto nodata;
> /* kmalloc_size_roundup() might give us more room than requested.
> @@ -1931,9 +1932,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
> if (skb_pfmemalloc(skb))
> gfp_mask |= __GFP_MEMALLOC;
>
> - size = SKB_HEAD_ALIGN(size);
> - size = kmalloc_size_roundup(size);
> - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
> + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
> if (!data)
> goto nodata;
> size = SKB_WITH_OVERHEAD(size);
> @@ -6282,9 +6281,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
> if (skb_pfmemalloc(skb))
> gfp_mask |= __GFP_MEMALLOC;
>
> - size = SKB_HEAD_ALIGN(size);
> - size = kmalloc_size_roundup(size);
> - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
> + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
> if (!data)
> return -ENOMEM;
> size = SKB_WITH_OVERHEAD(size);
> @@ -6400,9 +6397,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
> if (skb_pfmemalloc(skb))
> gfp_mask |= __GFP_MEMALLOC;
>
> - size = SKB_HEAD_ALIGN(size);
> - size = kmalloc_size_roundup(size);
> - data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
> + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
> if (!data)
> return -ENOMEM;
> size = SKB_WITH_OVERHEAD(size);
> --
> 2.39.1.456.gfc5497dd1b-goog
>
Powered by blists - more mailing lists