[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210111182801.12609-1-alobakin@pm.me>
Date: Mon, 11 Jan 2021 18:28:21 +0000
From: Alexander Lobakin <alobakin@...me>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: Eric Dumazet <edumazet@...gle.com>,
Edward Cree <ecree@...arflare.com>,
Jonathan Lemon <jonathan.lemon@...il.com>,
Willem de Bruijn <willemb@...gle.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Alexander Lobakin <alobakin@...me>,
Steffen Klassert <steffen.klassert@...unet.com>,
Guillaume Nault <gnault@...hat.com>,
Yadu Kishore <kyk.segfault@...il.com>,
Al Viro <viro@...iv.linux.org.uk>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next 1/5] skbuff: rename fields of struct napi_alloc_cache to be more intuitive
skb_cache and skb_count fields are used to store skbuff_heads queued
for freeing to flush them by bulks, and aren't related to allocation
path. Give them more obvious names to improve code understanding and
allow to expand this struct with more allocation-related elements.
Misc: indent struct napi_alloc_cache declaration for better reading.
Signed-off-by: Alexander Lobakin <alobakin@...me>
---
net/core/skbuff.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7626a33cce59..17ae5e90f103 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -366,9 +366,9 @@ EXPORT_SYMBOL(build_skb_around);
#define NAPI_SKB_CACHE_SIZE 64
struct napi_alloc_cache {
- struct page_frag_cache page;
- unsigned int skb_count;
- void *skb_cache[NAPI_SKB_CACHE_SIZE];
+ struct page_frag_cache page;
+ u32 flush_skb_count;
+ void *flush_skb_cache[NAPI_SKB_CACHE_SIZE];
};
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
@@ -860,11 +860,11 @@ void __kfree_skb_flush(void)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- /* flush skb_cache if containing objects */
- if (nc->skb_count) {
- kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
- nc->skb_cache);
- nc->skb_count = 0;
+ /* flush flush_skb_cache if containing objects */
+ if (nc->flush_skb_count) {
+ kmem_cache_free_bulk(skbuff_head_cache, nc->flush_skb_count,
+ nc->flush_skb_cache);
+ nc->flush_skb_count = 0;
}
}
@@ -876,18 +876,18 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
skb_release_all(skb);
/* record skb to CPU local list */
- nc->skb_cache[nc->skb_count++] = skb;
+ nc->flush_skb_cache[nc->flush_skb_count++] = skb;
#ifdef CONFIG_SLUB
/* SLUB writes into objects when freeing */
prefetchw(skb);
#endif
- /* flush skb_cache if it is filled */
- if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
+ /* flush flush_skb_cache if it is filled */
+ if (unlikely(nc->flush_skb_count == NAPI_SKB_CACHE_SIZE)) {
kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
- nc->skb_cache);
- nc->skb_count = 0;
+ nc->flush_skb_cache);
+ nc->flush_skb_count = 0;
}
}
void __kfree_skb_defer(struct sk_buff *skb)
--
2.30.0
Powered by blists - more mailing lists