[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210213141021.87840-8-alobakin@pm.me>
Date: Sat, 13 Feb 2021 14:12:13 +0000
From: Alexander Lobakin <alobakin@...me>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>
Cc: Jonathan Lemon <jonathan.lemon@...il.com>,
Eric Dumazet <edumazet@...gle.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Willem de Bruijn <willemb@...gle.com>,
Alexander Lobakin <alobakin@...me>,
Randy Dunlap <rdunlap@...radead.org>,
Kevin Hao <haokexin@...il.com>,
Pablo Neira Ayuso <pablo@...filter.org>,
Jakub Sitnicki <jakub@...udflare.com>,
Marco Elver <elver@...gle.com>,
Dexuan Cui <decui@...rosoft.com>,
Paolo Abeni <pabeni@...hat.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Alexander Duyck <alexanderduyck@...com>,
Alexander Duyck <alexander.duyck@...il.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andriin@...com>,
Taehee Yoo <ap420073@...il.com>, Wei Wang <weiwan@...gle.com>,
Cong Wang <xiyou.wangcong@...il.com>,
Björn Töpel <bjorn@...nel.org>,
Miaohe Lin <linmiaohe@...wei.com>,
Guillaume Nault <gnault@...hat.com>,
Florian Westphal <fw@...len.de>,
Edward Cree <ecree.xilinx@...il.com>,
linux-kernel@...r.kernel.org, netdev@...r.kernel.org
Subject: [PATCH v6 net-next 07/11] skbuff: move NAPI cache declarations upper in the file
NAPI cache structures will be used for allocating skbuff_heads,
so move their declarations a bit upper.
Signed-off-by: Alexander Lobakin <alobakin@...me>
---
net/core/skbuff.c | 90 +++++++++++++++++++++++------------------------
1 file changed, 45 insertions(+), 45 deletions(-)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4be2bb969535..860a9d4f752f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -119,6 +119,51 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
skb_panic(skb, sz, addr, __func__);
}
+#define NAPI_SKB_CACHE_SIZE 64
+
+struct napi_alloc_cache {
+ struct page_frag_cache page;
+ unsigned int skb_count;
+ void *skb_cache[NAPI_SKB_CACHE_SIZE];
+};
+
+static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
+
+static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask)
+{
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+
+ return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
+}
+
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
+{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
+ return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+}
+EXPORT_SYMBOL(__napi_alloc_frag_align);
+
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
+{
+ struct page_frag_cache *nc;
+ void *data;
+
+ fragsz = SKB_DATA_ALIGN(fragsz);
+ if (in_irq() || irqs_disabled()) {
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
+ } else {
+ local_bh_disable();
+ data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ local_bh_enable();
+ }
+ return data;
+}
+EXPORT_SYMBOL(__netdev_alloc_frag_align);
+
/* Caller must provide SKB that is memset cleared */
static void __build_skb_around(struct sk_buff *skb, void *data,
unsigned int frag_size)
@@ -220,51 +265,6 @@ struct sk_buff *build_skb_around(struct sk_buff *skb,
}
EXPORT_SYMBOL(build_skb_around);
-#define NAPI_SKB_CACHE_SIZE 64
-
-struct napi_alloc_cache {
- struct page_frag_cache page;
- unsigned int skb_count;
- void *skb_cache[NAPI_SKB_CACHE_SIZE];
-};
-
-static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
-static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-
-static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
-{
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
-}
-
-void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
-{
- fragsz = SKB_DATA_ALIGN(fragsz);
-
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
-}
-EXPORT_SYMBOL(__napi_alloc_frag_align);
-
-void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
-{
- struct page_frag_cache *nc;
- void *data;
-
- fragsz = SKB_DATA_ALIGN(fragsz);
- if (in_irq() || irqs_disabled()) {
- nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
- } else {
- local_bh_disable();
- data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
- local_bh_enable();
- }
- return data;
-}
-EXPORT_SYMBOL(__netdev_alloc_frag_align);
-
/*
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
* the caller if emergency pfmemalloc reserves are being used. If it is and
--
2.30.1
Powered by blists - more mailing lists