[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210111182801.12609-4-alobakin@pm.me>
Date: Mon, 11 Jan 2021 18:29:22 +0000
From: Alexander Lobakin <alobakin@...me>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: Eric Dumazet <edumazet@...gle.com>,
Edward Cree <ecree@...arflare.com>,
Jonathan Lemon <jonathan.lemon@...il.com>,
Willem de Bruijn <willemb@...gle.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Alexander Lobakin <alobakin@...me>,
Steffen Klassert <steffen.klassert@...unet.com>,
Guillaume Nault <gnault@...hat.com>,
Yadu Kishore <kyk.segfault@...il.com>,
Al Viro <viro@...iv.linux.org.uk>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next 4/5] skbuff: allocate skbuff_heads by bulks instead of one by one
Use the same napi_alloc_cache struct and the same approach as used
for bulk-freeing skbuff_heads to allocate them for new skbs.
The new skb_cache will store up to NAPI_SKB_CACHE_SIZE (currently
64, which equals to NAPI_POLL_WEIGHT to be capable to serve one
polling cycle) and will be refilled by bulks in case of full
depletion or after completing network softirqs.
Signed-off-by: Alexander Lobakin <alobakin@...me>
---
net/core/skbuff.c | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0e8c597ff6ce..57a7307689f3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -367,6 +367,8 @@ EXPORT_SYMBOL(build_skb_around);
struct napi_alloc_cache {
struct page_frag_cache page;
+ u32 skb_count;
+ void *skb_cache[NAPI_SKB_CACHE_SIZE];
u32 flush_skb_count;
void *flush_skb_cache[NAPI_SKB_CACHE_SIZE];
};
@@ -490,7 +492,15 @@ static struct sk_buff *__napi_decache_skb(struct napi_alloc_cache *nc)
if (nc->flush_skb_count)
return nc->flush_skb_cache[--nc->flush_skb_count];
- return kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (unlikely(!nc->skb_count))
+ nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
+ GFP_ATOMIC,
+ NAPI_SKB_CACHE_SIZE,
+ nc->skb_cache);
+ if (unlikely(!nc->skb_count))
+ return NULL;
+
+ return nc->skb_cache[--nc->skb_count];
}
/**
@@ -870,6 +880,7 @@ void __consume_stateless_skb(struct sk_buff *skb)
void __kfree_skb_flush(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ u32 num;
/* flush flush_skb_cache if containing objects */
if (nc->flush_skb_count) {
@@ -877,6 +888,13 @@ void __kfree_skb_flush(void)
nc->flush_skb_cache);
nc->flush_skb_count = 0;
}
+
+ num = NAPI_SKB_CACHE_SIZE - nc->skb_count;
+ if (num)
+ nc->skb_count += kmem_cache_alloc_bulk(skbuff_head_cache,
+ GFP_ATOMIC, num,
+ nc->skb_cache +
+ nc->skb_count);
}
static inline void _kfree_skb_defer(struct sk_buff *skb)
--
2.30.0
Powered by blists - more mailing lists