[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210113133635.39402-2-alobakin@pm.me>
Date: Wed, 13 Jan 2021 13:37:13 +0000
From: Alexander Lobakin <alobakin@...me>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: Eric Dumazet <edumazet@...gle.com>,
Edward Cree <ecree.xilinx@...il.com>,
Jonathan Lemon <jonathan.lemon@...il.com>,
Willem de Bruijn <willemb@...gle.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Alexander Lobakin <alobakin@...me>,
Steffen Klassert <steffen.klassert@...unet.com>,
Guillaume Nault <gnault@...hat.com>,
Yadu Kishore <kyk.segfault@...il.com>,
Al Viro <viro@...iv.linux.org.uk>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 net-next 2/3] skbuff: (re)use NAPI skb cache on allocation path
Instead of calling kmem_cache_alloc() every time when building a NAPI
skb, (re)use skbuff_heads from napi_alloc_cache.skb_cache. Previously
this cache was only used for bulk-freeing skbuff_heads consumed via
napi_consume_skb() or __kfree_skb_defer().
Typical path is:
- skb is queued for freeing from driver or stack, its skbuff_head
goes into the cache instead of immediate freeing;
- driver or stack requests NAPI skb allocation, an skbuff_head is
taken from the cache instead of allocation.
Corner cases:
- if it's empty on skb allocation, bulk-allocate the first half;
- if it's full on skb consuming, bulk-wipe the second half.
Also try to balance its size after completing network softirqs
(__kfree_skb_flush()).
prefetchw() on CONFIG_SLUB is dropped since it makes no sense anymore.
Suggested-by: Edward Cree <ecree.xilinx@...il.com>
Signed-off-by: Alexander Lobakin <alobakin@...me>
---
net/core/skbuff.c | 54 ++++++++++++++++++++++++++++++-----------------
1 file changed, 35 insertions(+), 19 deletions(-)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index dc3300dc2ac4..f42a3a04b918 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -364,6 +364,7 @@ struct sk_buff *build_skb_around(struct sk_buff *skb,
EXPORT_SYMBOL(build_skb_around);
#define NAPI_SKB_CACHE_SIZE 64
+#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
struct napi_alloc_cache {
struct page_frag_cache page;
@@ -487,7 +488,15 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
static struct sk_buff *napi_skb_cache_get(struct napi_alloc_cache *nc)
{
- return kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+ if (unlikely(!nc->skb_count))
+ nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
+ GFP_ATOMIC,
+ NAPI_SKB_CACHE_HALF,
+ nc->skb_cache);
+ if (unlikely(!nc->skb_count))
+ return NULL;
+
+ return nc->skb_cache[--nc->skb_count];
}
/**
@@ -867,40 +876,47 @@ void __consume_stateless_skb(struct sk_buff *skb)
void __kfree_skb_flush(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+ size_t count;
+ void **ptr;
+
+ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_HALF))
+ return;
+
+ if (nc->skb_count > NAPI_SKB_CACHE_HALF) {
+ count = nc->skb_count - NAPI_SKB_CACHE_HALF;
+ ptr = nc->skb_cache + NAPI_SKB_CACHE_HALF;
- /* flush skb_cache if containing objects */
- if (nc->skb_count) {
- kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
- nc->skb_cache);
- nc->skb_count = 0;
+ kmem_cache_free_bulk(skbuff_head_cache, count, ptr);
+ nc->skb_count = NAPI_SKB_CACHE_HALF;
+ } else {
+ count = NAPI_SKB_CACHE_HALF - nc->skb_count;
+ ptr = nc->skb_cache + nc->skb_count;
+
+ nc->skb_count += kmem_cache_alloc_bulk(skbuff_head_cache,
+ GFP_ATOMIC, count,
+ ptr);
}
}
-static inline void _kfree_skb_defer(struct sk_buff *skb)
+static void napi_skb_cache_put(struct sk_buff *skb)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
/* drop skb->head and call any destructors for packet */
skb_release_all(skb);
- /* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
-#ifdef CONFIG_SLUB
- /* SLUB writes into objects when freeing */
- prefetchw(skb);
-#endif
-
- /* flush skb_cache if it is filled */
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
- kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
- nc->skb_cache);
- nc->skb_count = 0;
+ kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
+ nc->skb_cache + NAPI_SKB_CACHE_HALF);
+ nc->skb_count = NAPI_SKB_CACHE_HALF;
}
}
+
void __kfree_skb_defer(struct sk_buff *skb)
{
- _kfree_skb_defer(skb);
+ napi_skb_cache_put(skb);
}
void napi_consume_skb(struct sk_buff *skb, int budget)
@@ -925,7 +941,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}
- _kfree_skb_defer(skb);
+ napi_skb_cache_put(skb);
}
EXPORT_SYMBOL(napi_consume_skb);
--
2.30.0
Powered by blists - more mailing lists