[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230524153311.3625329-8-dhowells@redhat.com>
Date: Wed, 24 May 2023 16:33:06 +0100
From: David Howells <dhowells@...hat.com>
To: netdev@...r.kernel.org
Cc: David Howells <dhowells@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
David Ahern <dsahern@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Jens Axboe <axboe@...nel.dk>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next 07/12] net: Clean up users of netdev_alloc_cache and napi_frag_cache
The users of netdev_alloc_cache and napi_frag_cache don't need to take the
bh lock around access to these fragment caches any more as the percpu
handling is now done in page_frag_alloc_align().
Signed-off-by: David Howells <dhowells@...hat.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: linux-mm@...ck.org
---
include/linux/skbuff.h | 3 ++-
net/core/skbuff.c | 29 +++++++++--------------------
2 files changed, 11 insertions(+), 21 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 41b63e72c6c3..e11a765fe7fa 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -252,7 +252,8 @@
/* Maximum value in skb->csum_level */
#define SKB_MAX_CSUM_LEVEL 3
-#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
+#define SKB_DATA_ALIGNMENT SMP_CACHE_BYTES
+#define SKB_DATA_ALIGN(X) ALIGN(X, SKB_DATA_ALIGNMENT)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 225a16f3713f..c2840b0dcad9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -291,27 +291,20 @@ void napi_get_frags_check(struct napi_struct *napi)
void *napi_alloc_frag_align(unsigned int fragsz, unsigned int align)
{
- fragsz = SKB_DATA_ALIGN(fragsz);
-
+ align = min_t(unsigned int, align, SKB_DATA_ALIGNMENT);
return page_frag_alloc_align(&napi_frag_cache, fragsz, GFP_ATOMIC, align);
}
EXPORT_SYMBOL(napi_alloc_frag_align);
void *netdev_alloc_frag_align(unsigned int fragsz, unsigned int align)
{
- void *data;
-
- fragsz = SKB_DATA_ALIGN(fragsz);
- if (in_hardirq() || irqs_disabled()) {
- data = page_frag_alloc_align(&netdev_alloc_cache,
+ align = min_t(unsigned int, align, SKB_DATA_ALIGNMENT);
+ if (in_hardirq() || irqs_disabled())
+ return page_frag_alloc_align(&netdev_alloc_cache,
fragsz, GFP_ATOMIC, align);
- } else {
- local_bh_disable();
- data = page_frag_alloc_align(&napi_frag_cache,
+ else
+ return page_frag_alloc_align(&napi_frag_cache,
fragsz, GFP_ATOMIC, align);
- local_bh_enable();
- }
- return data;
}
EXPORT_SYMBOL(netdev_alloc_frag_align);
@@ -709,15 +702,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
- if (in_hardirq() || irqs_disabled()) {
+ if (in_hardirq() || irqs_disabled())
data = page_frag_alloc(&netdev_alloc_cache, len, gfp_mask);
- pfmemalloc = folio_is_pfmemalloc(virt_to_folio(data));
- } else {
- local_bh_disable();
+ else
data = page_frag_alloc(&napi_frag_cache, len, gfp_mask);
- pfmemalloc = folio_is_pfmemalloc(virt_to_folio(data));
- local_bh_enable();
- }
+ pfmemalloc = folio_is_pfmemalloc(virt_to_folio(data));
if (unlikely(!data))
return NULL;
Powered by blists - more mailing lists