[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260206153048.17570-5-fw@strlen.de>
Date: Fri, 6 Feb 2026 16:30:41 +0100
From: Florian Westphal <fw@...len.de>
To: <netdev@...r.kernel.org>
Cc: Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
<netfilter-devel@...r.kernel.org>,
pablo@...filter.org
Subject: [PATCH v2 net-next 04/11] netfilter: flowtable: dedicated slab for flow entry
From: Qingfang Deng <dqfext@...il.com>
The size of `struct flow_offload` has grown beyond 256 bytes on 64-bit
kernels (currently 280 bytes) because of the `flow_offload_tunnel`
member added recently. So kmalloc() allocates from the kmalloc-512 slab,
causing significant memory waste per entry.
Introduce a dedicated slab cache for flow entries to reduce memory
footprint. Results in a reduction from 512 bytes to 320 bytes per entry
on x86_64 kernels.
Signed-off-by: Qingfang Deng <dqfext@...il.com>
Signed-off-by: Florian Westphal <fw@...len.de>
---
net/netfilter/nf_flow_table_core.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 06e8251a6644..2c4140e6f53c 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -16,6 +16,7 @@
static DEFINE_MUTEX(flowtable_lock);
static LIST_HEAD(flowtables);
+static __read_mostly struct kmem_cache *flow_offload_cachep;
static void
flow_offload_fill_dir(struct flow_offload *flow,
@@ -56,7 +57,7 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
if (unlikely(nf_ct_is_dying(ct)))
return NULL;
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kmem_cache_zalloc(flow_offload_cachep, GFP_ATOMIC);
if (!flow)
return NULL;
@@ -812,9 +813,13 @@ static int __init nf_flow_table_module_init(void)
{
int ret;
+ flow_offload_cachep = KMEM_CACHE(flow_offload, SLAB_HWCACHE_ALIGN);
+ if (!flow_offload_cachep)
+ return -ENOMEM;
+
ret = register_pernet_subsys(&nf_flow_table_net_ops);
if (ret < 0)
- return ret;
+ goto out_pernet;
ret = nf_flow_table_offload_init();
if (ret)
@@ -830,6 +835,8 @@ static int __init nf_flow_table_module_init(void)
nf_flow_table_offload_exit();
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
+out_pernet:
+ kmem_cache_destroy(flow_offload_cachep);
return ret;
}
@@ -837,6 +844,7 @@ static void __exit nf_flow_table_module_exit(void)
{
nf_flow_table_offload_exit();
unregister_pernet_subsys(&nf_flow_table_net_ops);
+ kmem_cache_destroy(flow_offload_cachep);
}
module_init(nf_flow_table_module_init);
--
2.52.0
Powered by blists - more mailing lists